repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
daltonmaag/freetype-py | examples/glyph-outline.py | 3 | 1282 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011-2015 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Glyph outline rendering
'''
from freetype import *
if __name__ == '__main__':
import numpy
import matplotlib.pyplot as plt
face = Face('./Vera.ttf')
face.set_char_size( 4*48*64 )
flags = FT_LOAD_DEFAULT | FT_LOAD_NO_BITMAP
face.load_char('S', flags )
slot = face.glyph
glyph = slot.get_glyph()
stroker = Stroker( )
stroker.set(64, FT_STROKER_LINECAP_ROUND, FT_STROKER_LINEJOIN_ROUND, 0 )
glyph.stroke( stroker )
blyph = glyph.to_bitmap(FT_RENDER_MODE_NORMAL, Vector(0,0))
bitmap = blyph.bitmap
width, rows, pitch = bitmap.width, bitmap.rows, bitmap.pitch
top, left = blyph.top, blyph.left
data = []
for i in range(rows):
data.extend(bitmap.buffer[i*pitch:i*pitch+width])
Z = numpy.array(data,dtype=numpy.ubyte).reshape(rows, width)
plt.figure(figsize=(6,8))
plt.imshow(Z, interpolation='nearest', cmap=plt.cm.gray_r, origin='lower')
plt.show()
| bsd-3-clause |
madgik/exareme | Exareme-Docker/src/mip-algorithms/tests/algorithm_tests_with_privacy/test_Cart.py | 1 | 12128 | import requests
import unittest
import os,sys
import json
import logging
import math
from scipy.stats import binom_test
from decimal import *
import csv
import pandas as pd
from sklearn import tree
from sklearn.tree import export_graphviz
from sklearn.tree import DecisionTreeClassifier
import itertools
import numpy as np
from sklearn import metrics
#from sklearn.model_selection import train_test_split
from scipy import stats
from os import path
sys.path.append(path.abspath(__file__))
from tests import vm_url
endpointUrl_CartTraining= vm_url + 'CART'
#endpointUrl_CartPredict= vm_url + 'CART_PREDICT'
path = '../data/dementia/'
from cartPredict_lib import cart_1_local, cart_1_global
# argsX = "montrealcognitiveassessment,"
# argsX +="rightpcggposteriorcingulategyrus,leftpcggposteriorcingulategyrus,"
# argsX +="rightacgganteriorcingulategyrus,"
# argsX +="leftmcggmiddlecingulategyrus,"
# argsX +="rightphgparahippocampalgyrus,"
# argsX +="rightententorhinalarea,"
# argsX +="righthippocampus,lefthippocampus,"
# argsX+= "rightthalamusproper,leftthalamusproper"
def run_sklearn_classification(trainingDatasetPath, PredictionDatasetPath, argsX, argsY, maxDepth):
#1. Read training data
df = pd.read_csv(trainingDatasetPath)
args_X = list(argsX.replace(" ", "").split(","))
args_Y = [argsY.replace(" ", "")][0]
varNames = [x for x in args_X]
varNames.append(args_Y)
df = df[varNames]
df = df.dropna()
X = df.drop(args_Y, axis=1)
y = df[args_Y]
#2. Run scikit tree classifier
clf = DecisionTreeClassifier(random_state=0, max_depth = int(maxDepth))
clf = clf.fit(X, y)
#3. Read testing dataset
df = pd.read_csv(PredictionDatasetPath)
varNames.append("IdForTesting")
df = df[varNames]
df = df.dropna()
X = df.drop(args_Y, axis=1)
y = df[args_Y]
#4. Run scikit predictor
mydata = []
ydata = []
for ind in df.index:
rowNew =[df[x][ind] for x in args_X]
mydata.append(rowNew)
ydata.append(y[ind])
predictedValues=list(clf.predict(mydata))
df["goldStandardImplementationResult"] = predictedValues
return df
def run_sklearn_regression(trainingDatasetPath, PredictionDatasetPath, argsX, argsY, maxDepth):
#1. Read training dataset
df = pd.read_csv(trainingDatasetPath)#, index_col ="subjectcode")
args_X = list(argsX.replace(" ", "").split(","))
args_Y = [argsY.replace(" ", "")][0]
varNames = [x for x in args_X]
varNames.append(args_Y)
df = df[varNames]
df = df.dropna()
X = df.drop(args_Y, axis=1)
y = df[args_Y]
#2. Fit regression model
regr = tree.DecisionTreeRegressor(random_state=0, max_depth = int(maxDepth))
regr = regr.fit(X, y)
#3. Read testing dataset
df = pd.read_csv(PredictionDatasetPath)#, index_col ="subjectcode")
varNames.append("IdForTesting")
df = df[varNames]
df = df.dropna()
X = df.drop(args_Y, axis=1)
y = df[args_Y]
#4. Predict
mydata = []
ydata = []
for ind in df.index:
rowNew =[df[x][ind] for x in args_X]
mydata.append(rowNew)
ydata.append(y[ind])
predictedValues=list(regr.predict(mydata))
df["goldStandardImplementationResult"] = predictedValues
return df
#PredictionDatasetPath, argsX, argsY, args_globalTreeJ, CategoricalVariables = path+"diabetes_testing.csv", data_CartPredict[0]['value'], data_CartPredict[1]['value'],exaremeResult_Training, {'Outcome': [u'0', u'1']}
def ExaremePredictSimulation(PredictionDatasetPath, argsX, argsY, args_globalTreeJ, categoricalVariables):
df = pd.read_csv(PredictionDatasetPath)#, index_col ="subjectcode")
args_X = list(argsX.replace(" ", "").split(","))
args_Y = [argsY.replace(" ", "")][0]
varNames = [x for x in args_X]
varNames.append(args_Y)
df = df[varNames]
df = df.dropna()
X = df.drop(args_Y, axis=1)
y = df[args_Y]
#CategoricalVariables = {'Outcome': [u'0', u'1']}
dataSchema = varNames
if args_Y in categoricalVariables:
df[args_Y] = df[args_Y].astype(str)
confusionMatrix, mse, counts, predictions = cart_1_local(df, dataSchema, categoricalVariables, args_X, [args_Y], args_globalTreeJ)
global_out = cart_1_global(args_X, args_Y, categoricalVariables, confusionMatrix, mse, counts, predictions)
return global_out
class Test_Cart(unittest.TestCase):
def test_Cart_1(self):
#Run Exareme Cart Training Algorithm
logging.info("---------- TEST 1: CART - classification tree on diabetes")
data_Cart = [{ "name": "x", "value": "Pregnancies,Glucose,BloodPressure,SkinThickness,Insulin,BMI,DiabetesPedigreeFunction,Age"},
{"name": "y", "value": "Outcome"},
{"name": "max_depth", "value": "200"},
{"name": "no_split_points","value": "50"},
{"name": "pathology","value":"dementia"},
{"name": "dataset", "value": "diabetes_training"},
{"name": "filter", "value": ""}]
headers = {'Content-type': 'application/json', "Accept": "text/plain"}
r = requests.post(endpointUrl_CartTraining,data=json.dumps(data_Cart),headers=headers)
exaremeResult_Training = json.loads(r.text)
print ("exaremeResult_Training", r.text)
#########################'##########################################
# #Run Exareme Cart Prediction Algorithm
data_CartPredict = [{ "name": "x", "value": "Pregnancies,Glucose,BloodPressure,SkinThickness,Insulin,BMI,DiabetesPedigreeFunction,Age,IdForTesting"},
{"name": "y", "value": "Outcome"},
{"name": "treeJson", "value": ""},
{"name": "treeFile", "value": "tree.txt"},
{"name": "pathology","value":"dementia"},
{"name": "dataset", "value": "diabetes_testing"},
{"name": "filter", "value": ""}]
# headers = {'Content-type': 'application/json', "Accept": "text/plain"}
# r = requests.post(endpointUrl_CartPredict,data=json.dumps(data_CartPredict),headers=headers)
# exaremeResult_Predict = json.loads(r.text)
r = ExaremePredictSimulation(path+"diabetes_testing.csv", data_CartPredict[0]['value'], data_CartPredict[1]['value'],exaremeResult_Training['result'][0]['data'], {'Outcome': [u'0', u'1']})
exaremeResult_Predict = json.loads(r)
print (exaremeResult_Predict)
####################################################################
#Run Python
data_sklearn = {"trainingDataset": "diabetes_training",
"testingDataset": "diabetes_testing",
"x": "Pregnancies,Glucose,BloodPressure,SkinThickness,Insulin,BMI,DiabetesPedigreeFunction,Age",
"y": "Outcome",
"max_depth": 200}
sklearnResult = run_sklearn_classification( path+data_sklearn['trainingDataset']+".csv",
path+data_sklearn['testingDataset']+".csv",
data_sklearn['x'], data_sklearn['y'], data_sklearn['max_depth'])
#CompareResults
exaremeResult = pd.DataFrame.from_dict(exaremeResult_Predict["result"][1]["data"])
joinTbls = pd.merge(exaremeResult, sklearnResult, left_on = "ids", right_on = "IdForTesting")
TT = TF = FT = FF = 0
for ind in joinTbls.index:
if int(joinTbls['predictions'][ind]) == joinTbls[data_sklearn['y']][ind] : #MIP correct
if joinTbls['goldStandardImplementationResult'][ind] == joinTbls[data_sklearn['y']][ind]: #GS correct
TT = TT + 1
else: TF =TF + 1
else: #MIP not correct
if joinTbls['goldStandardImplementationResult'][ind] == joinTbls[data_sklearn['y']][ind]: #GS correct
FT = FT + 1
else: FF = FF + 1
print (TT,TF,FT,FF)
p_value = stats.binom_test(x=FT, n=(TF+FT), p=0.5, alternative="greater")
if p_value > 0.05 :
print (p_value)
assert (1)
else:
print (p_value)
assert (0)
def test_Cart_2(self):
logging.info("---------- TEST 2: CART - regression tree with real variables - Training and prediction applied on the same dataset")
data_Cart = [{ "name": "x", "value": "Cement,BlastFurnaceSlag,FlyAsh,Water,Superplasticizer,CoarseAggregate,FineAggregate,Age"},
{"name": "y", "value": "ConcreteCompressiveStrength"},
{"name": "max_depth", "value": "200"},
{"name": "no_split_points","value": "20"},
{"name": "pathology","value":"dementia"},
{"name": "dataset", "value": "concrete_data_training"},
{"name": "filter", "value": ""}]
headers = {'Content-type': 'application/json', "Accept": "text/plain"}
r = requests.post(endpointUrl_CartTraining,data=json.dumps(data_Cart),headers=headers)
exaremeResult_Training = json.loads(r.text)
print ("exaremeResult_Training", r.text)
####################################################################
#Run Exareme Cart Prediction Algorithm
data_CartPredict = [{ "name": "x", "value":"Cement,BlastFurnaceSlag,FlyAsh,Water,Superplasticizer,CoarseAggregate,FineAggregate,Age,IdForTesting"},
{"name": "y", "value": "ConcreteCompressiveStrength"},
{"name": "treeJson", "value": ""},
{"name": "treeFile", "value": "tree.txt"},
{"name": "pathology","value":"dementia"},
{"name": "dataset", "value": "concrete_data_testing"},
{"name": "filter", "value": ""}]
#headers = {'Content-type': 'application/json', "Accept": "text/plain"}
#r = requests.post(endpointUrl_CartPredict,data=json.dumps(data_CartPredict),headers=headers)
#print ("ExaremeResult", r.text)
#exaremeResult_Predict = json.loads(r.text)
r = ExaremePredictSimulation(path+"concrete_data_testing.csv", data_CartPredict[0]['value'], data_CartPredict[1]['value'],exaremeResult_Training['result'][0]['data'], {})
exaremeResult_Predict = json.loads(r)
print (exaremeResult_Predict)
####################################################################
#Run Python
data_sklearn = {"trainingDataset": "concrete_data_training",
"testingDataset": "concrete_data_testing",
"x": "Cement,BlastFurnaceSlag,FlyAsh,Water,Superplasticizer,CoarseAggregate,FineAggregate,Age",
"y":"ConcreteCompressiveStrength",
"max_depth": 200}
sklearnResult = run_sklearn_regression(path+data_sklearn['trainingDataset']+".csv",
path+data_sklearn['testingDataset']+".csv",
data_sklearn['x'], data_sklearn['y'], data_sklearn['max_depth'])
#CompareResults
exaremeResult = pd.DataFrame.from_dict(exaremeResult_Predict["result"][1]["data"])
joinTbls = pd.merge(exaremeResult, sklearnResult, left_on = "ids", right_on = "IdForTesting")
mseMIP = []
mseGS = []
for ind in joinTbls.index:
mseMIP.append((joinTbls[data_sklearn['y']][ind] - joinTbls['predictions'][ind]) ** 2)
mseGS.append((joinTbls[data_sklearn['y']][ind] - joinTbls['goldStandardImplementationResult'][ind]) ** 2)
statistic, p_value = stats.wilcoxon(mseMIP,mseGS, zero_method = "wilcox" , correction =True, alternative = 'greater')
if p_value > 0.05 :
print (p_value)
assert (1)
else:
print (p_value)
assert (0)
| mit |
martinggww/lucasenlights | ETF/bin/3getBestTradeQuantSet.py | 1 | 3805 | import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import my_config as config
import logging
from src.calReturn import getTradeQuantDict, calExtremeReturn
import pandas as pd
import json
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
def compress(hold_sell_quant):
hold_quants = ','.join(['hold_' + quant for quant in hold_sell_quant['hold_quant']])
sell_quants = ','.join(['sell_' + quant for quant in hold_sell_quant['sell_quant']])
return hold_quants + ',' + sell_quants
def postProcess(fund_name, trade_quant_dict, short_trade_quant_dict):
'''Buy .....'''
out_trade_quant_dict = {}
for key, value in trade_quant_dict.iteritems():
#if key in short_trade_quant_dict:
# continue
#else:
if len(value['low_perc']) > 1:
value['low_perc'] = sum(value['low_perc'])/len(value['low_perc'])
value['high_perc'] = sum(value['high_perc']) / len(value['high_perc'])
else:
value['low_perc'] = value['low_perc'][0]
value['high_perc'] = value['high_perc'][0]
if value['low_perc'] < -0.08 or value['high_perc'] > 0.25:
print value['high_perc'], value['low_perc']
else:
out_trade_quant_dict[key] = value
trade_quant_file = '../' + fund_name + config.TRADE_QUANT
with open(trade_quant_file, 'w') as fp:
json.dump(out_trade_quant_dict, fp, sort_keys=True, indent=4, separators=(',', ': '))
print len(out_trade_quant_dict)
'''SELL .....'''
out_trade_quant_dict = {}
for key, value in short_trade_quant_dict.iteritems():
#if key in trade_quant_dict:
# continue
#else:
if len(value['low_perc']) > 1:
value['low_perc'] = sum(value['low_perc'])/len(value['low_perc'])
value['high_perc'] = sum(value['high_perc']) / len(value['high_perc'])
else:
value['low_perc'] = value['low_perc'][0]
value['high_perc'] = value['high_perc'][0]
if value['low_perc'] < -0.08 or value['high_perc'] > 0.25:
print value['high_perc'], value['low_perc']
else:
out_trade_quant_dict[key] = value
short_trade_quant_file = '../' + fund_name + config.SHORT_TRADE_QUANT
with open(short_trade_quant_file, 'w') as fp:
json.dump(out_trade_quant_dict, fp, sort_keys=True, indent=4, separators=(',', ': '))
print len(out_trade_quant_dict)
'''
Read price df, read statics data, for each record, calculate it's quantitative code
'''
if __name__ == '__main__':
if sys.argv[1] == 'usage':
print "Usage: getBestTradeQuantSet, this program will calculate the best quantitiave codes of trading data"
print "usage: python 3getBestTradeQuantSet.py <fund_name>"
print "eg: python 3getBestTradeQuantSet.py lucas"
exit(1)
fund_name = sys.argv[1]
data_frame = 'daily'
TRADE_DF_PICKLE = '../'+fund_name+ config.TRADE_DF_PICKLE
QUANT_DF_PICKLE = '../'+fund_name+ config.QUANT_DF_PICKLE
df = pd.read_pickle(TRADE_DF_PICKLE)
quant_df = pd.read_pickle(QUANT_DF_PICKLE)
print df.shape
start_index = 0
end_index = df.shape[0]
n = config.MAX_HOLD_DAYS
stickers = config.FUND_STICKERS[fund_name]
long_trades, short_trades= calExtremeReturn(stickers, df, start_index, end_index, n)
trade_quant_dict = getTradeQuantDict(quant_df, long_trades)
short_trade_quant_dict = getTradeQuantDict(quant_df, short_trades)
print "Before merging: long quotes: " + str(len(trade_quant_dict))
print "shorte quotes: " + str(len(short_trade_quant_dict))
#post process
print "After merging:"
postProcess(fund_name, trade_quant_dict, short_trade_quant_dict)
| cc0-1.0 |
mwil/collision | figs/ser_contour/plot_ser_contour.py | 1 | 3683 | #!/usr/bin/env python2.7
# Copyright 2013-2014 Matthias Wilhelm
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.font_manager import FontProperties
######################################################
############### PLOT OPTIONS ###################
######################################################
Au = np.sqrt(1e4)
mode = ("sync", "usync")[1]
content = ("same", "unif")[1]
decision = ("soft", "hard")[0]
######################################################
######################################################
mpl.rc_file("../rc/3fig-contour-rc.txt")
def plot():
data = np.load("data/ser_Au%.2f_%s_%s_v2.npz"%(Au, content, decision))
Zs = data["SER_S"]
Zu = data["SER_U"]
Z = (Zu if mode in ("usync",) else Zs)
tau_range = data["tau_range"]
phi_range = data["phi_range"]
PHI, TAU = np.meshgrid(phi_range, tau_range)
print("DEBUG: If 'Inputs x and y must be 1D or 2D.' -> Shape mismatch PHI, TAU, Z: ", PHI.shape, TAU.shape, Z.shape)
CSf = plt.contourf(TAU, PHI/np.pi, Z, levels=(0.0, 1e-3, 0.25, 0.9, 1.0), colors=("0.0", "0.5", "0.75", "1."), origin="lower")
CS2 = plt.contour(CSf, levels=(0.9, 0.25, 1e-3), colors=("1.0", "r", "w"), linewidths=(0., 0.75, 1.0), origin="lower", hold="on")
plt.axis([-1.5, 1.5, -1, 1])
plt.xlabel(r"Time offset $\tau$ ($/T$)", labelpad=2)
plt.ylabel(r"Carrier phase offset $\varphi_c$ ($/\pi$)", labelpad=0)
plt.annotate(r"capture zone", xy=(-0.35, 0.125), xytext=(-1.425, 0.475), color="0.0",
fontproperties=FontProperties(size=10, style="italic"),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.2", color="0.55", lw=1.25))
plt.annotate("", xy=(-0.35, 0.85), xytext=(-0.8, 0.525),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=0.2", lw=1.25, color="0.55"))
plt.annotate("", xy=(-0.35, -0.85), xytext=(-0.9, 0.45),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.2", lw=1.25, color="0.55"))
plt.savefig("pdf/serc_Au%.2f_%s_%s_%s_v2.pdf"%(Au, mode, content, decision))
#plt.savefig("png/serc2_Au%.2f_%s_%s_%s.png"%(Au, mode, content, decision), dpi=600)
def colorbar_only():
fig = plt.figure(figsize=(0.375, 1.92))
ax1 = fig.add_axes([0, 0.05, 0.25, 1])
cmap = mpl.colors.ListedColormap(["0.0", "0.5", "0.75", "1.0"])
bounds = [0.0, 1e-3, 0.25, 0.9, 1.0]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cb2 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap,
norm=norm,
boundaries=bounds,
ticks=bounds, # optional
orientation="vertical")
cb2.set_ticklabels(["0", "$10^{-3}$", "0.25", "0.9", "1"])
cb2.set_label(r"Symbol error rate ($\mathrm{SER}$)", fontsize=12, labelpad=6)
plt.savefig("pdf/cb.pdf")
######################################################
if __name__ == "__main__":
plot()
#colorbar_only()
| gpl-3.0 |
cl4rke/scikit-learn | examples/model_selection/plot_roc.py | 146 | 3697 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
# Plot ROC curve
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]))
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
jseabold/scikit-learn | examples/svm/plot_svm_anova.py | 12 | 2017 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
mlyundin/scikit-learn | examples/ensemble/plot_forest_importances.py | 241 | 1761 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
| bsd-3-clause |
drusk/pml | test/matchers/general_matchers.py | 1 | 2013 | # Copyright (C) 2012 David Rusk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Custom Hamcrest matchers for general purposes not related to pml or pandas
data structures.
@author: drusk
"""
from hamcrest.core.base_matcher import BaseMatcher
class InRange(BaseMatcher):
"""
Matches values within a specified range (inclusive).
"""
def __init__(self, minval, maxval):
"""
Creates a new matcher given the expected minimum and maximum values.
"""
self.minval = minval
self.maxval = maxval
def _matches(self, val):
return val <= self.maxval and val >= self.minval
def describe_to(self, description):
description.append_text("value between %s and %s"
%(self.minval, self.maxval))
def in_range(minval, maxval):
"""
Checks if a value is within the range specified by minval and maxval,
inclusive.
"""
return InRange(minval, maxval)
| mit |
anjalisood/spark-tk | regression-tests/sparktkregtests/testcases/dicom/create_dicom_test.py | 12 | 3719 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests import_dicom functionality"""
import unittest
from sparktkregtests.lib import sparktk_test
import os
import numpy
import dicom
from lxml import etree
class CreateDicomTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(CreateDicomTest, self).setUp()
self.dataset = self.get_file("dicom_uncompressed")
self.dicom = self.context.dicom.import_dcm(self.dataset)
self.xml_directory = self.get_local_dataset("dicom_xml/")
self.image_directory = self.get_local_dataset("dicom_uncompressed/")
def test_metadata_content_import_dcm_basic(self):
"""content test of dicom metadata import"""
# here we will get the files so we can generate the expected result
files = []
for filename in os.listdir(self.xml_directory):
if filename.endswith(".xml"):
with open(self.xml_directory + str(filename), 'rb') as xmlfile:
contents = xmlfile.read()
xml = etree.fromstring(contents)
bulk_data = xml.xpath("//BulkData")[0]
bulk_data.getparent().remove(bulk_data)
files.append(etree.tostring(xml))
# the BulkData location element of the metadata xml will be different
# since the dicom may load the data from a differnet location then
# where we loaded our files. We will remove this element from the metadata
# before we compare
metadata_pandas = self.dicom.metadata.to_pandas()
for dcm_file in metadata_pandas["metadata"]:
dcm_file = dcm_file.encode("ascii", "ignore")
dcm_xml_root = etree.fromstring(dcm_file)
dcm_bulk_data = dcm_xml_root.xpath("//BulkData")[0]
dcm_bulk_data.getparent().remove(dcm_bulk_data)
self.assertTrue(etree.tostring(dcm_xml_root) in files)
def test_image_content_import_dcm_basic(self):
"""content test of image data for dicom"""
# load the files so we can compare with the dicom result
files = []
for filename in os.listdir(self.image_directory):
pixel_data = dicom.read_file(self.image_directory + filename).pixel_array
files.append(pixel_data)
# iterate through the data in the files and in the dicom frame
# and ensure that they match
image_pandas = self.dicom.pixeldata.to_pandas()["imagematrix"]
for dcm_image in image_pandas:
result = any(numpy.array_equal(dcm_image, file_image) for file_image in files)
self.assertTrue(result)
def test_import_dicom_invalid_files(self):
"""tests import dicom with invalid data"""
dataset = self.get_file("int_str_int.csv")
with self.assertRaisesRegexp(Exception, "Not a DICOM Stream"):
dicom = self.context.dicom.import_dcm(dataset)
dicom.metadata.count()
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
rafael-radkowski/ME325 | MohrsCircle2D.py | 1 | 22916 |
"""Failure theories for ductile materials - principal stress example
# -*- coding: utf-8 -*-
This module provides and example explaining the failure theorie for ductile materials
in a plane. The user can set the principal stresses and the yield stress for a material.
The class show a plot with the two envelopes and the principal stress:
Implemented theories are:
- Maximum shear stress theory or Tresca theory
- Distortion enerty theory or von Mises theory.
User input:
- Yield strength
- Principal stresses
Press 'e' for manual input.
Example:
root = Tk()
root.geometry("800x600+300+300")
app = DuctileMaterial_FailureTheory_01()
root.mainloop()
or from a console.
$ python DuctileFailureTheory01.py
Note that this script was developed and tested with Python 3.5.3
Attributes:
-
Todo:
*
Rafael Radkowski
Iowa State University
Dec. 28, 2018
rafael@iastate.edu
All copyright reserved.
"""
# matplotlib
import platform
import matplotlib
if platform.system() == 'Darwin':
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
# tkinter for the display
from tkinter import *
from tkinter import Canvas, messagebox
from tkinter import Tk, BOTH, W, N, E, S
from tkinter.ttk import Frame, Button, Label, Scale, Checkbutton, Combobox
import tkinter as tk
# for images
from PIL import Image, ImageTk
# failure theory implementation
from ME325Common.FailureTheories import *
from ME325Common.PlotHelpers import *
from ME325Common.ContinuumMechanics import *
from ME325Common.UnitConversion import *
class MohrsCircle2D(Frame):
"""
This class implements examples for two failure theories for ductile materials.
See documentation
"""
# limits
limit_stress = 1000
# default values
default_stress = 50
# canvas
__canvas = 0
# Tresca and Mises plot
__the_plot = None
__show_helpers = True
slider_length = 220
# dicts
__labels = {}
__sliders = {}
__output = {}
__results = {}
# variables
__var = {}
# checkbox
__checkbox_helpers = 0
__combo = 0
combobox_str = ["SI", "USCS"]
__unit_str = "(N/mm^2)"
#submenu
menu = None
entry_items = ["\u03C3_x", "\u03C3_y", "\u03C4_xy"]
def __init__(self):
super().__init__()
# the plot
self.__the_plot = MohrsCirclePlot()
# init ui
self.initUI()
# menu
self.menu = MohrsCircle2DDetails(self.master, self.manual_entry_callback)
# update ui
self.update_values(0.0)
# ----------- Update the outputs ----------
def update_plot(self):
"""
Update the plot area
:return:
"""
sx = self.__var["sx"].get()
sy = self.__var["sy"].get()
txy = self.__var["txy"].get()
s1 = self.__results["s1"]
s2 = self.__results["s2"]
a1 = self.__results["a1"]
a2 = self.__results["a2"]
t1 = self.__results["t1"]
t2 = self.__results["t2"]
# Update the plot
self.__the_plot.update_plot(sx, sy, txy)
self.__the_plot.update_helpers(s1, s2, a1, a2, t1, t2, self.__show_helpers)
self.__canvas.draw_idle()
self.menu.update_plot(s1, s2, a1)
def update_output_display(self):
"""
Update the output display, the stresses and factor of safeties this
panel shows.
:return:
"""
self.__var["s1"].set(str(round(self.__results["s1"],2)))
self.__var["s2"].set(str(round(self.__results["s2"],2)))
self.__var["tmax"].set(str(round(abs(self.__results["t1"]), 2)))
self.__var["a1"].set(str(round(self.__results["a1"], 2)))
self.__var["a2"].set(str(round(self.__results["a2"], 2)))
return
# ---------Widget callbacks ---------------
def update_values(self, val):
"""
Update function for all widgets. The function updates the
input values. Note that all slider widgets call this functions
:param val: The value the widget passes
:return: -
"""
self.__var["sx"].set( round(self.__sliders["sx"].get(),2))
self.__var["sx_str"].set(str(self.__var["sx"].get()))
self.__var["sy"].set(round(self.__sliders["sy"].get(), 2))
self.__var["sy_str"].set(str(self.__var["sy"].get()))
self.__var["txy"].set(round(self.__sliders["txy"].get(), 2))
self.__var["txy_str"].set(str(self.__var["txy"].get()))
sx = self.__var["sx"].get()
sy = self.__var["sy"].get()
txy = self.__var["txy"].get()
# Calculate new results
s1, s2 = calcPrincipalStress(sx, sy, txy)
a1, a2 = calcPrincipalAngles(sx, sy, txy)
t1, t2 = calcMaxShearStress(sx, sy, txy)
self.__results["s1"] = s1
self.__results["s2"] = s2
if sx >= sy:
self.__results["a1"] = a1
self.__results["a2"] = a2
else:
self.__results["a1"] = a2
self.__results["a2"] = a1
self.__results["t1"] = t1
self.__results["t2"] = t2
self.update_plot()
self.update_output_display()
def cb_update(self):
"""
Checkbox update. Captures the ckeckbox clicks.
Checkboxes do not pass any arguments to the function
:return:
"""
#self.failure_theory_plts.showVonMisesPlt(int(self.cb_mises.get()))
#self.failure_theory_plts.showTrescaPlt(int(self.cb_tresca.get()))
if self.__checkbox_helpers.get() == 1:
self.__show_helpers = True
else:
self.__show_helpers = False
self.update_values(0)
def key_callback(self, event):
"""
Create a subwindow to allow for user input
:param event:
:return:
"""
if event.char == 'e':
self.create_subUI()
elif event.char == 'd':
return
def manual_entry_callback(self):
"""
Apply the values that the user set in the sub window
:return:
"""
try:
d = self.menu.get()
self.__sliders["sx"].set(float(d[self.entry_items[0]].get()))
self.__sliders["sy"].set(float(d[self.entry_items[1]].get()))
self.__sliders["txy"].set(float(d[self.entry_items[2]].get()))
# get values
self.update_values(0)
except ValueError:
print("Something went wrong - invalid numbers")
except KeyError:
print("Something went wrong - wrong key")
def __combobox_callback(self, event):
c = self.__combo.get()
self.__change_unit(c)
self.update()
def __save_plot(self):
try:
p = self.__the_plot.save_plot()
messagebox.showinfo("Save Plot", str("Saved the plot as: " + p))
except:
pass
try:
self.menu.save_plot()
except:
pass
def __change_unit(self, unit):
if unit == self.combobox_str[0]: # change to si units
self.__unit_str = "(N/mm^2)"
self.__the_plot.set_unit(0)
self.__sliders["sx"].configure(from_=-self.limit_stress, to=self.limit_stress)
self.__sliders["sy"].configure(from_=-self.limit_stress, to=self.limit_stress)
self.__sliders["txy"].configure(from_=-self.limit_stress, to=self.limit_stress)
self.__sliders["sx"].set(UnitConversion.psi_to_Nmm2(self.__var["sx"].get()) * 1000)
self.__sliders["sy"].set(UnitConversion.psi_to_Nmm2(self.__var["sy"].get()) * 1000)
self.__sliders["txy"].set(UnitConversion.psi_to_Nmm2(self.__var["txy"].get()) * 1000)
#d = {self.entry_items[0]: UnitConversion.psi_to_Nmm2(self.__var["sx"].get()) * 1000,
# self.entry_items[1]: UnitConversion.psi_to_Nmm2(self.__var["sy"].get()) * 1000,
# self.entry_items[2]: UnitConversion.psi_to_Nmm2(self.__var["txy"].get()) * 1000}
#self.menu.set(d)
else: # change to uscs units
self.__unit_str = "(ksi)"
self.__the_plot.set_unit(1)
self.__sliders["sx"].configure(from_=-UnitConversion.Nmm2_to_psi(self.limit_stress / 1000),
to=UnitConversion.Nmm2_to_psi(self.limit_stress / 1000))
self.__sliders["sx"].set(UnitConversion.Nmm2_to_psi(self.__var["sx"].get()) / 1000)
self.__sliders["sy"].configure(from_=-UnitConversion.Nmm2_to_psi(self.limit_stress / 1000),
to=UnitConversion.Nmm2_to_psi(self.limit_stress / 1000))
self.__sliders["sy"].set(UnitConversion.Nmm2_to_psi(self.__var["sy"].get()) / 1000)
self.__sliders["txy"].configure(from_=-UnitConversion.Nmm2_to_psi(self.limit_stress / 1000),
to=UnitConversion.Nmm2_to_psi(self.limit_stress / 1000))
self.__sliders["txy"].set(UnitConversion.Nmm2_to_psi(self.__var["txy"].get()) / 1000)
#d = {self.entry_items[0]: UnitConversion.Nmm2_to_psi(self.__var["sx"].get()) / 1000,
# self.entry_items[1]: UnitConversion.Nmm2_to_psi(self.__var["sy"].get()) / 1000,
# self.entry_items[2]: UnitConversion.Nmm2_to_psi(self.__var["txy"].get()) / 1000}
#self.menu.set(d)
self.__labels["sx"].configure(text=str("\u03C3_x " + self.__unit_str + ":"))
self.__labels["sy"].configure(text=str("\u03C3_y " + self.__unit_str + ":"))
self.__labels["txy"].configure(text=str("\u03C4_xy " + self.__unit_str + ":"))
self.__labels["s1"].configure(text=str("\u03C3_1 " + self.__unit_str + ":"))
self.__labels["s2"].configure(text=str("\u03C3_3 " + self.__unit_str + ":"))
self.__labels["tmax"].configure(text=str("\u03C4_max " + self.__unit_str + ":"))
self.__canvas.draw_idle()
# ------------ Inits ---------------
def create_subUI(self):
"""
Create a window that allows a user to manually enter all the values
instead of using sliders
:return:
"""
try:
display_items = []
self.menu.create_menu("Enter data", self.entry_items, None)
d = {self.entry_items[0]: self.__var["sx"].get(),
self.entry_items[1]: self.__var["sy"].get(),
self.entry_items[2]: self.__var["txy"].get()}
self.menu.set(d)
s1 = self.__results["s1"]
s2 = self.__results["s2"]
a1 = self.__results["a1"]
self.menu.update_plot(s1, s2, a1)
except ValueError:
print("Something went wrong")
def create_plot(self):
"""
Create the plot that shows the failure theories
:return:
"""
fig = self.__the_plot.create_plot(8) # 9 -> figure size
self.__canvas = FigureCanvasTkAgg(fig, master=self)
self.__canvas.draw()
def initUI(self):
"""
Init the user interface and all widgets
:return: -
"""
rows_for_plot = 24
cols_for_plot = 5
output_row_start = 14
self.master.title("ME 325 Machine Component Design")
self.pack(fill=BOTH, expand=True)
# keyboard binding
self.master.bind("e", self.key_callback)
self.master.bind("d", self.key_callback)
self.columnconfigure(0, weight=1) # first and last column can expand
self.columnconfigure(0, pad=7)
self.rowconfigure(rows_for_plot, weight=1)
self.rowconfigure(rows_for_plot, pad=7)
lbl = Label(self, text="Mohr's Circle for plane materials")
lbl.grid(sticky=W, pady=4, padx=5)
self.__canvas = Canvas(self, width=300, height=300)
self.create_plot()
self.__canvas.get_tk_widget().grid(row=1, column=0, columnspan=cols_for_plot, rowspan=rows_for_plot,
padx=5, sticky=E + W + S + N)
Label(self, text="Input:",font='Helvetica 14 bold').grid(sticky=NW, row=1, column=cols_for_plot+1)
#-----------
# sx
self.__labels["sx"] = Label(self, text=str("\u03C3_x "+self.__unit_str + ":"), width=13)
self.__labels["sx"].grid(sticky=W, row=2, column=cols_for_plot+1)
self.__var["sx_str"] = StringVar()
self.__var["sx_str"].set(str(self.default_stress))
self.__var["sx"] = DoubleVar()
self.__var["sx"].set(self.default_stress)
self.__output["sx"] = Label(self, textvariable=self.__var["sx_str"], width=15)
self.__output["sx"].grid(sticky=W, row=2, column=cols_for_plot+2)
self.__sliders["sx"] = Scale(self, value=self.default_stress, from_=-self.limit_stress,
to=self.limit_stress,orient=HORIZONTAL,
length=self.slider_length, command=self.update_values)
self.__sliders["sx"].grid(sticky=W, row=3, column=cols_for_plot+1, columnspan=2)
##---------------------------------------------------------------------------------
# sy
self.__labels["sy"] = Label(self, text=str("\u03C3_y "+self.__unit_str + ":"))
self.__labels["sy"].grid(sticky=W, row=4, column=cols_for_plot + 1)
self.__var["sy_str"] = StringVar()
self.__var["sy_str"].set(str("-" + str(self.default_stress)))
self.__var["sy"] = DoubleVar()
self.__var["sy"].set(-self.default_stress)
self.__output["sy"] = Label(self, textvariable=self.__var["sy_str"])
self.__output["sy"].grid(sticky=W, row=4, column=cols_for_plot + 2)
self.__sliders["sy"] = Scale(self, value=-self.default_stress, from_=-self.limit_stress,
to=self.limit_stress, orient=HORIZONTAL,
length=self.slider_length, command=self.update_values)
self.__sliders["sy"].grid(sticky=W, row=5, column=cols_for_plot + 1, columnspan=2)
##---------------------------------------------------------------------------------
# t_xy
self.__labels["txy"] = Label(self, text=str("\u03C4_xy "+self.__unit_str + ":"))
self.__labels["txy"].grid(sticky=W, row=6, column=cols_for_plot + 1)
self.__var["txy_str"] = StringVar()
self.__var["txy_str"].set(str(self.default_stress/2))
self.__var["txy"] = DoubleVar()
self.__var["txy"].set(self.default_stress/2)
self.__output["txy"] = Label(self, textvariable=self.__var["txy_str"])
self.__output["txy"].grid(sticky=W, row=6, column=cols_for_plot + 2)
self.__sliders["txy"] = Scale(self, value=self.default_stress/2, from_=-self.limit_stress,
to=self.limit_stress, orient=HORIZONTAL,
length=self.slider_length, command=self.update_values)
self.__sliders["txy"].grid(sticky=W, row=7, column=cols_for_plot + 1, columnspan=2)
##---------------------------------------------------------------------------------
# Output
Label(self, text="Output:",font='Helvetica 14 bold').grid(sticky=NW, row=output_row_start, column=cols_for_plot+1)
# s1
self.__labels["s1"] = Label(self, text=str("\u03C3_1 "+self.__unit_str + ":"))
self.__labels["s1"].grid(sticky=W, row=output_row_start+1, column=cols_for_plot + 1)
self.__var["s1"] = StringVar()
self.__var["s1"].set("0")
self.__output["s1"] = Label(self, textvariable=self.__var["s1"])
self.__output["s1"].grid(sticky=W, row=output_row_start + 1, column=cols_for_plot + 2)
# s2
self.__labels["s2"] = Label(self, text=str("\u03C3_3 "+self.__unit_str + ":"))
self.__labels["s2"].grid(sticky=W, row=output_row_start + 2, column=cols_for_plot + 1)
self.__var["s2"] = StringVar()
self.__var["s2"].set("0")
self.__output["s2"] = Label(self, textvariable=self.__var["s2"])
self.__output["s2"].grid(sticky=W, row=output_row_start + 2, column=cols_for_plot + 2)
# angle 1
self.__labels["a1"] = Label(self, text="\u03B8_x (deg):")
self.__labels["a1"].grid(sticky=W, row=output_row_start + 3, column=cols_for_plot + 1)
self.__var["a1"] = StringVar()
self.__var["a1"].set("0")
self.__output["a1"] = Label(self, textvariable=self.__var["a1"])
self.__output["a1"].grid(sticky=W, row=output_row_start + 3, column=cols_for_plot + 2)
# angle 2
self.__labels["a2"] = Label(self, text="\u03B8_y (deg):")
self.__labels["a2"].grid(sticky=W, row=output_row_start + 4, column=cols_for_plot + 1)
self.__var["a2"] = StringVar()
self.__var["a2"].set("0")
self.__output["a2"] = Label(self, textvariable=self.__var["a2"])
self.__output["a2"].grid(sticky=W, row=output_row_start + 4, column=cols_for_plot + 2)
# tau max
self.__labels["tmax"] = Label(self, text=str("\u03C4_max "+self.__unit_str + ":"))
self.__labels["tmax"].grid(sticky=W, row=output_row_start + 5, column=cols_for_plot + 1)
self.__var["tmax"] = StringVar()
self.__var["tmax"].set("0")
self.__output["tmax"] = Label(self, textvariable=self.__var["tmax"])
self.__output["tmax"].grid(sticky=W, row=output_row_start + 5, column=cols_for_plot + 2)
#Label(self, text="press 'd' for details ",font='Helvetica 12').grid(sticky=NW, row=output_row_start + 6,
# column=cols_for_plot+1, columnspan=2)
##---------------------------------------------------------------------------------
# Others
cbtn = Button(self, text="Exit", command=self.quit)
cbtn.grid(row=rows_for_plot+1, column=cols_for_plot+2, pady=4, sticky=E)
Button(self, text="Save", command=self.__save_plot).grid(row=rows_for_plot + 1, column=cols_for_plot+1,
pady=4, sticky=W)
self.__checkbox_helpers = IntVar()
self.__checkbox_helpers.set(1)
check1 = Checkbutton(self, text="Helpers", variable=self.__checkbox_helpers, command=self.cb_update)
check1.grid(row=rows_for_plot+1, column=cols_for_plot-1, sticky=W)
Label(self, text="press 'e' for manual input.", font='Helvetica 12').grid(row=rows_for_plot+1, column=0, sticky=W, padx=7)
self.__combo = Combobox(self, values=self.combobox_str, state='readonly', width=6)
self.__combo.grid(sticky=W, row=rows_for_plot+1, column=cols_for_plot-4, columnspan=1, padx=1, pady=0)
self.__combo.current(0)
self.__combo.bind("<<ComboboxSelected>>", self.__combobox_callback)
class MohrsCircle2DDetails():
__toplevel = 0 # Toplevel window
__event_callback = 0 # main window callback
__window = None # This window
__dict = {} #dict with output data
__entry_dict = {} # dict to store reference to all Entry widgets
__plot = 0
__canvas = 0
def __init__(self, toplevel, event_callback):
self.__toplevel = toplevel
self.__event_callback = event_callback
def create_menu(self, title, entry_items, display_items ):
"""
:param entry_items: list with data to enter
:param display_items: list with data to display
:return:
"""
cols_for_plot = 4
rows_for_plot = 4
self.__window = Toplevel(self.__toplevel )
tk.Label(self.__window, text=title, background='white', font="Helvetica 14 bold").grid(sticky=NW, row=0,
column=0)
n = len(entry_items)
for i in range(n):
self.__dict[entry_items[i]] = StringVar()
tk.Label(self.__window, text=entry_items[i], background='white').grid(sticky=NW, row=i + 1, column=0)
e = Entry(self.__window, textvariable=self.__dict[entry_items[i]], width=15)
e.grid(sticky=NW, row=i + 1, column=1)
self.__entry_dict[entry_items[i]] = e
tk.Button(self.__window, text="Use", command=self.__event_callback,
background='white').grid(sticky=NE, row=n + 1, column=1, padx=7, pady=7)
tk.Label(self.__window, text="Stress tensor", background='white', font="Helvetica 14 bold").grid(sticky=NW, row=n+2,
column=0)
## Create the plot
plot_start_row = n+3
self.__plot = CauchyStressPlanePlot()
fig = self.__plot.create_plot(4)
self.__canvas = FigureCanvasTkAgg(fig, master=self.__window )
self.__canvas.get_tk_widget().grid(row=plot_start_row, column=0, columnspan=cols_for_plot, rowspan=rows_for_plot,
padx=5, sticky=E + W + S + N)
self.__canvas.draw()
self.__window.columnconfigure(cols_for_plot-1, weight=1) # first and last column can expand
self.__window.columnconfigure(cols_for_plot-1, pad=7)
self.__window.rowconfigure(plot_start_row, weight=1)
self.__window.rowconfigure(plot_start_row, pad=7)
tk.Button(self.__window, text="Close", command=self.__destroyed_callback,
background='white').grid(sticky=NW, row=plot_start_row+ rows_for_plot + 2, column=0, padx=7, pady=7)
return
def get(self):
return self.__dict
def set(self, items_dict):
try:
keys = items_dict.keys()
n = len(keys)
for key, value in items_dict.items():
self.__dict[key].set(str(value))
except ValueError:
print("Something went wrong - invalid values")
except KeyError:
return
def set_readonly(self, readonly_key):
for key, value in self.__entry_dict.items():
if key == readonly_key:
value.configure(state='disabled')
else:
value.configure(state='normal')
def __destroyed_callback(self):
self.__window.destroy()
self.__window = None
def save_plot(self):
self.__plot.save_plot()
def update_plot(self, s1, s2, a1):
try:
self.__plot.update_plot(s1, s2, a1)
self.__canvas.draw_idle()
except AttributeError:
return
def main():
root = Tk()
root.geometry("900x720+300+300")
app = MohrsCircle2D()
# To jump the window to the front
root.attributes("-topmost", True)
root.after_idle(root.attributes, '-topmost', False)
# run
root.mainloop()
if __name__ == '__main__':
main() | mit |
3nrique0/Tools_for_Bioinformatics | best_blast_hit/bbh.py | 1 | 1579 | #! /usr/bin/python3.5
import pandas as pd
import argparse
dfTest = pd.DataFrame({'id' : [1,1,1,2,2,3,3,3,3,4,4,5,6,6,6,7,7],
'value' : ["first","second","second","first",
"second","first","third","fourth",
"fifth","second","fifth","first",
"first","second","third","fourth","fifth"]})
#### ARGUMENTS HAVE TO BE RE-WORKED ON
# def __main__():
# parser = argparse.ArgumentParser(description='''
# Get only best blast hit (BBH) output from blast results''')
#
# parser.add_argument("-f", "--file output", dest="blastHandle",
# type=str, default=None,
# help='''Blast output file (blast outfmt 6 only)\n''')
#
#
# args=parser.parse_args()
# df = pd.read_csv(args.blastHandle, sep="\t")
#
#
# print(df)
#
# if __name__ == "__main__": __main__()
## VARIABLES:
inputFile = "batch_1_uniq_blastx_eval1_outfmt6.out"
evalueMax = 1e-4
blastHeader = ['queryId', 'subjectId', 'identity', 'alignmentLength', 'mismatches', 'gapOpens', 'qStart', 'qEnd', 'sStart', 'sEnd', 'evalue', 'bitScore']
## LOAD BLAST OUTPUT FILE
with open(inputFile,'r') as blastHandle:
df = pd.read_csv(blastHandle, sep="\t", names=blastHeader)
## TAKE ONLY THE FIRST LINE FOR EACH QUERY ID == TAKE ONLY BEST BLAST HIT
df2 = df.groupby('queryId').first()
with open(inputFile+'.bbh', 'w') as outFile:
df2.to_csv(outFile, sep='\t')
print('Ta Daaaaa')
## COMMAND TO FILTER ON A COLUMN WITH SOME GIVEN PARAMETERS
#df3 = df2[df2['evalue'] < evalueMax ]
## ALWAYS LOL ON THE BRIGHT SIDE OF LIFE ^_^
#subjectIdUniq = list(set(sorted(df3['subjectId'])))
| gpl-3.0 |
Ydmir/PyOL | calc_ol.py | 1 | 5272 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Calculates oceanloading displacement.
calc_ol takes a site, a start date, an end date, and a time interval and calculates
oceanloading displacements at the site.
Usage:
calc_ol [-h] [-m MODEL] [-f] [-p] [-v] <site> <start_date> <end_date> <interval>
Arguments:
<site>: name of the station.
<start_date>: start date in ISO format: 2001-01-01T12:00:00
<end_date>: end date in ISO format.
<interval>: interval between points in minutes.
Optional arguments:
-h, --help Shows help message and exits
-f, --file If true then *.txt file with data is created
-p, --plot If true then plot is displayed
-v, --verbose Increase output verbosity
-t, A test
-m MODEL, --model MODEL Name of the ocean model to use (default: GOT00.2)
"""
import argparse
import logging
import pyol
import datetime
import matplotlib.pyplot as plt
from matplotlib import style
import dateutil.parser
import pkg_resources
from typing import List
def write_ol_to_file(site: str, date_list: List[datetime.datetime], oldata: List[float], oceanModel: str, filename: str):
"""
Saves the ocean loading time series to a file
Args:
site: Name of the station
date_list: datetimes for the oceanloading calculations
oldata: calculated displacements
oceanModel: Name of the used ocean model
file: Name of the file to which the time series are saved
"""
with open(filename, 'w') as f:
f.write('# Site : %s\n' % site)
f.write('# Ocean Model: %s\n' % oceanModel)
f.write('#DATE\t TIME\t\t dU [m]\t dW [m]\t dS[m]\n')
for i in range(len(date_list)):
f.write ('%s %9.6f %9.6f %9.6f\n' % (date_list[i].strftime('%Y-%m-%d %H:%M:%S'), oldata[i, 0], oldata[i, 1], oldata[i, 2]))
f.write('#END OF FILE')
def plot_ol(site: str, date_list: [datetime.datetime], oldata: [float], oceanModel: str):
"""
Plots ocean loading displacements
Args:
site: Name of the station
date_list: datetimes for the oceanloading calculations
oldata: calculated displacements
oceanModel: Name of the ocean model to used
"""
style.use(['classic', 'seaborn-whitegrid', 'seaborn-talk', 'seaborn-deep'])
fig, ax = plt.subplots()
ax.plot(date_list, oldata[:, 0] * 1000, label='Radial [mm]', linestyle='-')
ax.plot(date_list, oldata[:, 1] * 1000, label='West [mm]', linestyle='-')
ax.plot(date_list, oldata[:, 2] * 1000, label='South [mm]', linestyle='-')
plt.ylabel('Displacement [mm]')
plt.title(site.upper() + ", model: " + oceanModel)
ax.legend(loc='best')
fig.autofmt_xdate()
plt.show()
if __name__ == '__main__':
# Just some parsing to datetime formats, then calling the actual computation file.
parser = argparse.ArgumentParser(description='Compute displacements caused by ocean tides.')
parser.add_argument('site', type=str, help='Name of the station')
parser.add_argument('start_date', type=str,help='Start date in ISO format: 2017-01-01T12:00:00')
parser.add_argument('end_date', type=str, help='End date in ISO format : 2017-01-01T12:00:00')
parser.add_argument('interval', type=float,help='Interval between points in minutes')
parser.add_argument("-m","--model", type=str, required=False, default='GOT00.2', help='Name of the ocean loading model to use (default: GOT00.2)')
parser.add_argument("-f", "--file", required=False, action="store_true", help='If true then *.txt file with data is created')
parser.add_argument("-p", "--plot", required=False, action="store_true", help='If true then plot is displayed')
parser.add_argument("-v", "--verbose", required=False,action="store_true", help="Increase output verbosity" )
args = parser.parse_args()
calc_start_date = dateutil.parser.parse(args.start_date)
calc_end_date = dateutil.parser.parse(args.end_date)
calc_interval = datetime.timedelta(seconds=60*args.interval)
oModel=args.model
if args.verbose:
logging.basicConfig(level=logging.INFO)
logging.info('Using %s ocean model...' % args.model)
logging.info('Start date : %s' % calc_start_date.strftime('%Y-%m-%d %H:%M:%S'))
logging.info('End date : %s' % calc_end_date.strftime('%Y-%m-%d %H:%M:%S'))
logging.info('Interval [min]: %.2f' % args.interval)
path = pkg_resources.resource_filename(__name__, args.site + '_' + calc_start_date.strftime('%y%j') + '_' + calc_end_date.strftime('%y%j') +'_' + str(int(args.interval)) + '.txt')
# Compute displacements (N,E,U)
date_list = [calc_start_date + i*calc_interval for i in range(int((calc_end_date-calc_start_date)/calc_interval))]
py_data = pyol.calc_displacement(date_list, args.site, oModel)
print(args)
if args.file: # if True then store results into file
write_ol_to_file(site=args.site, date_list=date_list, oldata=py_data, oceanModel=oModel, filename=path)
logging.info('Data stored in %s ' % path)
if args.plot: # if True then display results
plot_ol(site=args.site, date_list=date_list, oldata=py_data, oceanModel=oModel)
| mit |
Akshay0724/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
jjs0sbw/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/tests/test_mathtext.py | 2 | 8630 | import numpy as np
import matplotlib
from matplotlib.testing.decorators import image_comparison, knownfailureif
import matplotlib.pyplot as plt
math_tests = [
r'$a+b+\dots+\dot{s}+\ldots$',
r'$x \doteq y$',
r'\$100.00 $\alpha \_$',
r'$\frac{\$100.00}{y}$',
r'$x y$',
r'$x+y\ x=y\ x<y\ x:y\ x,y\ x@y$',
r'$100\%y\ x*y\ x/y x\$y$',
r'$x\leftarrow y\ x\forall y\ x-y$',
r'$x \sf x \bf x {\cal X} \rm x$',
r'$x\ x\,x\;x\quad x\qquad x\!x\hspace{ 0.5 }y$',
r'$\{ \rm braces \}$',
r'$\left[\left\lfloor\frac{5}{\frac{\left(3\right)}{4}} y\right)\right]$',
r'$\left(x\right)$',
r'$\sin(x)$',
r'$x_2$',
r'$x^2$',
r'$x^2_y$',
r'$x_y^2$',
r'$\prod_{i=\alpha_{i+1}}^\infty$',
r'$x = \frac{x+\frac{5}{2}}{\frac{y+3}{8}}$',
r'$dz/dt = \gamma x^2 + {\rm sin}(2\pi y+\phi)$',
r'Foo: $\alpha_{i+1}^j = {\rm sin}(2\pi f_j t_i) e^{-5 t_i/\tau}$',
r'$\mathcal{R}\prod_{i=\alpha_{i+1}}^\infty a_i \sin(2 \pi f x_i)$',
r'Variable $i$ is good',
r'$\Delta_i^j$',
r'$\Delta^j_{i+1}$',
r'$\ddot{o}\acute{e}\grave{e}\hat{O}\breve{\imath}\tilde{n}\vec{q}$',
r"$\arccos((x^i))$",
r"$\gamma = \frac{x=\frac{6}{8}}{y} \delta$",
r'$\limsup_{x\to\infty}$',
r'$\oint^\infty_0$',
r"$f'$",
r'$\frac{x_2888}{y}$',
r"$\sqrt[3]{\frac{X_2}{Y}}=5$",
r"$\sqrt[5]{\prod^\frac{x}{2\pi^2}_\infty}$",
r"$\sqrt[3]{x}=5$",
r'$\frac{X}{\frac{X}{Y}}$',
r"$W^{3\beta}_{\delta_1 \rho_1 \sigma_2} = U^{3\beta}_{\delta_1 \rho_1} + \frac{1}{8 \pi 2} \int^{\alpha_2}_{\alpha_2} d \alpha^\prime_2 \left[\frac{ U^{2\beta}_{\delta_1 \rho_1} - \alpha^\prime_2U^{1\beta}_{\rho_1 \sigma_2} }{U^{0\beta}_{\rho_1 \sigma_2}}\right]$",
r'$\mathcal{H} = \int d \tau \left(\epsilon E^2 + \mu H^2\right)$',
r'$\widehat{abc}\widetilde{def}$',
r'$\Gamma \Delta \Theta \Lambda \Xi \Pi \Sigma \Upsilon \Phi \Psi \Omega$',
r'$\alpha \beta \gamma \delta \epsilon \zeta \eta \theta \iota \lambda \mu \nu \xi \pi \kappa \rho \sigma \tau \upsilon \phi \chi \psi$',
# r'$\operatorname{cos} x$',
# The examples prefixed by 'mmltt' are from the MathML torture test here:
# http://www.mozilla.org/projects/mathml/demo/texvsmml.xhtml
r'${x}^{2}{y}^{2}$',
r'${}_{2}F_{3}$',
r'$\frac{x+{y}^{2}}{k+1}$',
r'$x+{y}^{\frac{2}{k+1}}$',
r'$\frac{a}{b/2}$',
r'${a}_{0}+\frac{1}{{a}_{1}+\frac{1}{{a}_{2}+\frac{1}{{a}_{3}+\frac{1}{{a}_{4}}}}}$',
r'${a}_{0}+\frac{1}{{a}_{1}+\frac{1}{{a}_{2}+\frac{1}{{a}_{3}+\frac{1}{{a}_{4}}}}}$',
r'$\binom{n}{k/2}$',
r'$\binom{p}{2}{x}^{2}{y}^{p-2}-\frac{1}{1-x}\frac{1}{1-{x}^{2}}$',
# 'mmltt10' : r'$\sum _{\genfrac{}{}{0}{}{0\leq i\leq m}{0<j<n}}P\left(i,j\right)$',
r'${x}^{2y}$',
r'$\sum _{i=1}^{p}\sum _{j=1}^{q}\sum _{k=1}^{r}{a}_{ij}{b}_{jk}{c}_{ki}$',
r'$\sqrt{1+\sqrt{1+\sqrt{1+\sqrt{1+\sqrt{1+\sqrt{1+\sqrt{1+x}}}}}}}$',
r'$\left(\frac{{\partial }^{2}}{\partial {x}^{2}}+\frac{{\partial }^{2}}{\partial {y}^{2}}\right){|\varphi \left(x+iy\right)|}^{2}=0$',
r'${2}^{{2}^{{2}^{x}}}$',
r'${\int }_{1}^{x}\frac{\mathrm{dt}}{t}$',
r'$\int {\int }_{D}\mathrm{dx} \mathrm{dy}$',
# mathtex doesn't support array
# 'mmltt18' : r'$f\left(x\right)=\left\{\begin{array}{cc}\hfill 1/3\hfill & \text{if_}0\le x\le 1;\hfill \\ \hfill 2/3\hfill & \hfill \text{if_}3\le x\le 4;\hfill \\ \hfill 0\hfill & \text{elsewhere.}\hfill \end{array}$',
# mathtex doesn't support stackrel
# 'mmltt19' : ur'$\stackrel{\stackrel{k\text{times}}{\ufe37}}{x+...+x}$',
r'${y}_{{x}^{2}}$',
# mathtex doesn't support the "\text" command
# 'mmltt21' : r'$\sum _{p\text{\prime}}f\left(p\right)={\int }_{t>1}f\left(t\right) d\pi \left(t\right)$',
# mathtex doesn't support array
# 'mmltt23' : r'$\left(\begin{array}{cc}\hfill \left(\begin{array}{cc}\hfill a\hfill & \hfill b\hfill \\ \hfill c\hfill & \hfill d\hfill \end{array}\right)\hfill & \hfill \left(\begin{array}{cc}\hfill e\hfill & \hfill f\hfill \\ \hfill g\hfill & \hfill h\hfill \end{array}\right)\hfill \\ \hfill 0\hfill & \hfill \left(\begin{array}{cc}\hfill i\hfill & \hfill j\hfill \\ \hfill k\hfill & \hfill l\hfill \end{array}\right)\hfill \end{array}\right)$',
# mathtex doesn't support array
# 'mmltt24' : u'$det|\\begin{array}{ccccc}\\hfill {c}_{0}\\hfill & \\hfill {c}_{1}\\hfill & \\hfill {c}_{2}\\hfill & \\hfill \\dots \\hfill & \\hfill {c}_{n}\\hfill \\\\ \\hfill {c}_{1}\\hfill & \\hfill {c}_{2}\\hfill & \\hfill {c}_{3}\\hfill & \\hfill \\dots \\hfill & \\hfill {c}_{n+1}\\hfill \\\\ \\hfill {c}_{2}\\hfill & \\hfill {c}_{3}\\hfill & \\hfill {c}_{4}\\hfill & \\hfill \\dots \\hfill & \\hfill {c}_{n+2}\\hfill \\\\ \\hfill \\u22ee\\hfill & \\hfill \\u22ee\\hfill & \\hfill \\u22ee\\hfill & \\hfill \\hfill & \\hfill \\u22ee\\hfill \\\\ \\hfill {c}_{n}\\hfill & \\hfill {c}_{n+1}\\hfill & \\hfill {c}_{n+2}\\hfill & \\hfill \\dots \\hfill & \\hfill {c}_{2n}\\hfill \\end{array}|>0$',
r'${y}_{{x}_{2}}$',
r'${x}_{92}^{31415}+\pi $',
r'${x}_{{y}_{b}^{a}}^{{z}_{c}^{d}}$',
r'${y}_{3}^{\prime \prime \prime }$',
r"$\left( \xi \left( 1 - \xi \right) \right)$", # Bug 2969451
r"$\left(2 \, a=b\right)$", # Sage bug #8125
r"$? ! &$", # github issue #466
]
digits = "0123456789"
uppercase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
lowercase = "abcdefghijklmnopqrstuvwxyz"
uppergreek = (r"\Gamma \Delta \Theta \Lambda \Xi \Pi \Sigma \Upsilon \Phi \Psi "
r"\Omega")
lowergreek = (r"\alpha \beta \gamma \delta \epsilon \zeta \eta \theta \iota "
r"\lambda \mu \nu \xi \pi \kappa \rho \sigma \tau \upsilon "
r"\phi \chi \psi")
all = [digits, uppercase, lowercase, uppergreek, lowergreek]
font_test_specs = [
([], all),
(['mathrm'], all),
(['mathbf'], all),
(['mathit'], all),
(['mathtt'], [digits, uppercase, lowercase]),
(['mathcircled'], [digits, uppercase, lowercase]),
(['mathrm', 'mathcircled'], [digits, uppercase, lowercase]),
(['mathbf', 'mathcircled'], [digits, uppercase, lowercase]),
(['mathbb'], [digits, uppercase, lowercase,
r'\Gamma \Pi \Sigma \gamma \pi']),
(['mathrm', 'mathbb'], [digits, uppercase, lowercase,
r'\Gamma \Pi \Sigma \gamma \pi']),
(['mathbf', 'mathbb'], [digits, uppercase, lowercase,
r'\Gamma \Pi \Sigma \gamma \pi']),
(['mathcal'], [uppercase]),
(['mathfrak'], [uppercase, lowercase]),
(['mathbf', 'mathfrak'], [uppercase, lowercase]),
(['mathscr'], [uppercase, lowercase]),
(['mathsf'], [digits, uppercase, lowercase]),
(['mathrm', 'mathsf'], [digits, uppercase, lowercase]),
(['mathbf', 'mathsf'], [digits, uppercase, lowercase])
]
font_tests = []
for fonts, chars in font_test_specs:
wrapper = [' '.join(fonts), ' $']
for font in fonts:
wrapper.append(r'\%s{' % font)
wrapper.append('%s')
for font in fonts:
wrapper.append('}')
wrapper.append('$')
wrapper = ''.join(wrapper)
for set in chars:
font_tests.append(wrapper % set)
def make_set(basename, fontset, tests, extensions=None):
def make_test(filename, test):
@image_comparison(baseline_images=[filename], extensions=extensions,
freetype_version=('2.4.5', '2.4.9'))
def single_test():
matplotlib.rcParams['mathtext.fontset'] = fontset
fig = plt.figure(figsize=(5.25, 0.75))
fig.text(0.5, 0.5, test, horizontalalignment='center', verticalalignment='center')
func = single_test
func.__name__ = filename + "_test"
return func
# We inject test functions into the global namespace, rather than
# using a generator, so that individual tests can be run more
# easily from the commandline and so each test will have its own
# result.
for i, test in enumerate(tests):
filename = '%s_%s_%02d' % (basename, fontset, i)
globals()['test_%s' % filename] = make_test(filename, test)
make_set('mathtext', 'cm', math_tests)
make_set('mathtext', 'stix', math_tests)
make_set('mathtext', 'stixsans', math_tests)
make_set('mathfont', 'cm', font_tests, ['png'])
make_set('mathfont', 'stix', font_tests, ['png'])
make_set('mathfont', 'stixsans', font_tests, ['png'])
def test_fontinfo():
import matplotlib.font_manager as font_manager
import matplotlib.ft2font as ft2font
fontpath = font_manager.findfont("Bitstream Vera Sans")
font = ft2font.FT2Font(fontpath)
table = font.get_sfnt_table("head")
assert table['version'] == (1, 0)
| gpl-3.0 |
stephenphillips42/stephenphillips42.github.io | markdown_generator/talks.py | 199 | 4000 |
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
| mit |
akionakamura/scikit-learn | sklearn/neighbors/tests/test_kde.py | 208 | 5556 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
xcompass/pandas-gbq | setup.py | 1 | 1378 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import versioneer
NAME = 'pandas-gbq'
# versioning
cmdclass = versioneer.get_cmdclass()
def readme():
with open('README.rst') as f:
return f.read()
INSTALL_REQUIRES = (
['pandas', 'httplib2', 'google-api-python-client', 'oauth2client']
)
setup(
name=NAME,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Pandas interface to Google Big Query",
long_description=readme(),
license='BSD License',
author='The PyData Development Team',
author_email='pydata@googlegroups.com',
url='https://github.com/pydata/pandas-gbq',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
],
keywords='data',
install_requires=INSTALL_REQUIRES,
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
test_suite='tests',
)
| bsd-3-clause |
napratin/nap | nap/tests/test_neuron.py | 1 | 7299 | from time import time, sleep
from unittest import TestCase
import numpy as np
from nap.neuron import Neuron, threshold_potential, action_potential_peak, action_potential_trough, synaptic_strength, setup_neuron_plot
from matplotlib.pyplot import figure, plot, subplot, subplots_adjust, draw, pause, hold, show, xlim, ylim, title, xlabel, ylabel, axhline
import matplotlib as mpl
mpl.rc('axes', titlesize=22, labelsize=20)
mpl.rc('xtick', labelsize=16)
mpl.rc('ytick', labelsize=16)
holdPlots = False # wait for user to close plot windows?
class TestNeuron(TestCase):
"""Test neuron functions individually and in small groups."""
def setUp(self):
pass
def tearDown(self):
pass
def test_single(self, duration=10.0, delay=0.01):
startTime = time()
timeNow = 0.0
n = Neuron((0.0, 0.0, 0.0), timeNow)
# Set up plotting
figure(figsize = (12, 9))
hold(True) # [graph]
xlim(0.0, duration) # [graph]
ylim(action_potential_trough.mu - 0.01, action_potential_peak + 0.02) # [graph]
setup_neuron_plot("Stimulated neuron", None, "Membrane potential (V)")
while timeNow <= duration:
timeNow = time() - startTime
n.accumulate(np.random.normal(0.001, 0.00025))
n.update(timeNow)
n.plot() # [graph]
pause(delay) # [graph]
#sleep(delay)
if holdPlots:
# Show plot (and wait till it's closed)
show() # [graph]
def test_pair(self, pre_duration=2.0, stimulus_duration=10.0, post_duration=2.0, delay=0.01):
total_duration = pre_duration + stimulus_duration + post_duration
stimulus_begin = pre_duration
stimulus_end = pre_duration + stimulus_duration
startTime = time()
timeNow = 0.0
n1 = Neuron((0.0, 0.0, 0.0), timeNow)
n2 = Neuron((0.0, 0.0, 1.0), timeNow)
n1.synapseWith(n2)
# Set up plotting
figure(figsize = (12, 9))
hold(True) # [graph]
subplot(211) # [graph]
xlim(0.0, total_duration) # [graph]
ylim(action_potential_trough.mu - 0.01, action_potential_peak + 0.02) # [graph]
setup_neuron_plot("Presynaptic neuron", None, "Membrane potential (V)")
subplot(212) # [graph]
xlim(0.0, total_duration) # [graph]
ylim(action_potential_trough.mu - 0.01, action_potential_peak + 0.02) # [graph]
setup_neuron_plot("Postsynaptic neuron", "Time (s)", "Membrane potential (V)")
subplots_adjust(hspace = 0.33)
while timeNow <= total_duration:
timeNow = time() - startTime
if stimulus_begin <= timeNow <= stimulus_end:
n1.accumulate(np.random.normal(0.0035, 0.0005)) # TODO accumulate value based on deltaTime (since last accumulate)
n1.update(timeNow)
#print n1.id, n1.timeCurrent, n1.potential # [log: potential]
n2.update(timeNow)
#print n2.id, n2.timeCurrent, n2.potential # [log: potential]
subplot(211) # [graph]
n1.plot() # [graph]
subplot(212) # [graph]
n2.plot() # [graph]
pause(delay) # [graph]
#sleep(delay)
if holdPlots:
show() # [graph]
def test_gatekeeper(self, total_duration=14.0, stimulus_period=(2.0, 12.0), gate_period=(5.0, 8.0), delay=0.01):
startTime = time()
timeNow = 0.0
n1 = Neuron((0.0, 0.0, 0.0), timeNow)
n2 = Neuron((0.0, 0.0, 1.0), timeNow)
g1 = Neuron((-1.0, -1.0, 1.0), timeNow) # gatekeeper
n1.synapseWith(n2, None, g1) # auto-initialize synaptic strength
# Set up plotting
figure(figsize = (12, 9))
hold(True) # [graph]
ax = subplot(311) # [graph]
xlim(0.0, total_duration) # [graph]
ylim(action_potential_trough.mu - 0.01, action_potential_peak + 0.01) # [graph]
setup_neuron_plot("Presynaptic neuron", None, None)
ax.get_xaxis().set_ticklabels([])
ax = subplot(312) # [graph]
xlim(0.0, total_duration) # [graph]
ylim(action_potential_trough.mu - 0.01, action_potential_peak + 0.01) # [graph]
setup_neuron_plot("Postsynaptic neuron", None, "Membrane potential (V)")
ax.get_xaxis().set_ticklabels([])
subplot(313) # [graph]
xlim(0.0, total_duration) # [graph]
ylim(action_potential_trough.mu - 0.01, action_potential_peak + 0.01) # [graph]
setup_neuron_plot("Gatekeeper neuron", "Time (s)", None)
subplots_adjust(hspace = 0.33)
while timeNow <= total_duration:
timeNow = time() - startTime
if stimulus_period[0] <= timeNow <= stimulus_period[1]:
n1.accumulate(np.random.normal(0.004, 0.0005)) # TODO accumulate value based on deltaTime (since last accumulate)
if gate_period[0] <= timeNow <= gate_period[1]:
g1.accumulate(np.random.normal(0.0035, 0.0005))
n1.update(timeNow)
#print n1.id, n1.timeCurrent, n1.potential # [log: potential]
n2.update(timeNow)
#print n2.id, n2.timeCurrent, n2.potential # [log: potential]
g1.update(timeNow)
subplot(311) # [graph]
n1.plot() # [graph]
subplot(312) # [graph]
n2.plot() # [graph]
subplot(313) # [graph]
g1.plot() # [graph]
pause(delay) # [graph]
#sleep(delay)
if holdPlots:
show() # [graph]
def test_inhibitor(self, total_duration=14.0, stimulus_period=(2.0, 12.0), inhibition_period=(5.0, 8.0), delay=0.01):
startTime = time()
timeNow = 0.0
n1 = Neuron((0.0, 0.0, 0.0), timeNow)
n2 = Neuron((0.0, 0.0, 1.0), timeNow)
i1 = Neuron((-1.0, -1.0, 1.0), timeNow) # inhibitor
n1.synapseWith(n2) # no synaptic gating
i1.synapseWith(n2, -np.random.normal(synaptic_strength.mu, synaptic_strength.sigma)) # inhibitory synapse
# Set up plotting
figure(figsize = (12, 9))
hold(True) # [graph]
subplot(311) # [graph]
xlim(0.0, total_duration) # [graph]
ylim(action_potential_trough.mu - 0.01, action_potential_peak + 0.01) # [graph]
setup_neuron_plot("Neuron " + str(n1.id))
subplot(312) # [graph]
xlim(0.0, total_duration) # [graph]
ylim(action_potential_trough.mu - 0.01, action_potential_peak + 0.01) # [graph]
setup_neuron_plot("Neuron " + str(n2.id))
subplot(313) # [graph]
xlim(0.0, total_duration) # [graph]
ylim(action_potential_trough.mu - 0.01, action_potential_peak + 0.01) # [graph]
setup_neuron_plot("Neuron " + str(i1.id) + " (inhibitor)")
subplots_adjust(hspace = 0.33)
while timeNow <= total_duration:
timeNow = time() - startTime
if stimulus_period[0] <= timeNow <= stimulus_period[1]:
n1.accumulate(np.random.normal(0.004, 0.0005)) # TODO accumulate value based on deltaTime (since last accumulate)
if inhibition_period[0] <= timeNow <= inhibition_period[1]:
i1.accumulate(np.random.normal(0.0035, 0.0005))
n1.update(timeNow)
#print n1.id, n1.timeCurrent, n1.potential # [log: potential]
n2.update(timeNow)
#print n2.id, n2.timeCurrent, n2.potential # [log: potential]
i1.update(timeNow)
subplot(311) # [graph]
n1.plot() # [graph]
subplot(312) # [graph]
n2.plot() # [graph]
subplot(313) # [graph]
i1.plot() # [graph]
pause(delay) # [graph]
#sleep(delay)
if holdPlots:
show() # [graph]
| mit |
cysjtu/SentimentAnalysis | test.py | 1 | 14738 | # encoding=utf-8
'''
Created on 2015年12月10日
@author: nali
'''
import csv
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import BernoulliNB
from sklearn import cross_validation
from sklearn.metrics import classification_report
import numpy as np
from sklearn.metrics import accuracy_score
import jieba
import jieba.posseg as pseg
from xml.dom import minidom
import MySQLdb
import re
import random
conn=MySQLdb.connect(host="127.0.0.1",user="root",passwd="cy1993",port=3306,db="lexicon",charset='utf8')
cur=conn.cursor()
#query="insert into lex(word,polar) values(%s,%s)"
#str="看了这本书,总体感觉老外带孩子比中国人要粗些。其实,本来就不用太过细致了,人家那样照样能把孩子带好,不能把孩子放在保险箱里养。这书挺好的,可以看看"
#cuts=pseg.cut(str)
InverseWord_dict={}
stopWord_dict={}
sentiment_dict={}
level_dict={}
def build_sentiment_dict():
global sentiment_dict
query2="select * from sentiment"
cur.execute(query2)
data=cur.fetchall()
for s in data:
sentiment_dict[s[1]]=s[2]
print len(sentiment_dict.keys())
def getPolar(word):
global sentiment_dict
if sentiment_dict.has_key(word):
return sentiment_dict[word]
else:
return -1
"""
query2="select polar from sentiment where word = %s"
cur.execute(query2,(word))
polar=cur.fetchone()
if polar is not None:
#print polar
return polar[0]
else :
return -1
"""
def build_level_dict():
global level_dict
query2="select * from level"
cur.execute(query2)
data=cur.fetchall()
for s in data:
level_dict[s[1]]=(s[2],s[3])
print len(level_dict.keys())
def getLevel(word):
global level_dict
if level_dict.has_key(word):
return level_dict[word]
else:
return (-1,-1000.0)
"""
query2="select * from level where word = %s"
cur.execute(query2,(word))
res=cur.fetchone()
if res is not None:
#print float(res[3])
return (res[2],float(res[3]) )
else :
return (-1,-1000.0)
"""
def getSimilar(word):
query2 = "select * from similar where word = %s"
query3 = "select * from similar where class = %s"
cur.execute(query2, (word))
tmp = cur.fetchone()
data = []
if tmp is not None:
word_class = tmp[2]
#print word_class
cur.execute(query3, (word_class))
sim = cur.fetchall()
if sim is not None:
for s in sim:
data.append(s[1])
return data
def build_InverseWord_dict():
global InverseWord_dict
query2="select * from inverse"
cur.execute(query2)
data=cur.fetchall()
for s in data:
InverseWord_dict[s[0]]=1
print len(InverseWord_dict.keys())
#判断一个词语是不是否定词
def isInverseWord(word):
global InverseWord_dict
if InverseWord_dict.has_key(word):
return 1
else:
return -1
"""
query2="select * from inverse where word = %s"
cur.execute(query2,(word))
res=cur.fetchone()
if res is not None:
#print polar
return 1
else :
return -1
"""
def build_stopWord_dict():
global stopWord_dict
query2="select * from stop"
cur.execute(query2)
data=cur.fetchall()
for s in data:
stopWord_dict[s[0]]=1
print len(stopWord_dict.keys())
def isstopWord(word):
global stopWord_dict
if stopWord_dict.has_key(word):
return 1
else:
return -1
"""
query2="select * from stop where word = %s"
cur.execute(query2,(word))
res=cur.fetchone()
if res is not None:
#print polar
return 1
else :
return -1
"""
##===========================================================
def insert_stop(word):
query="insert into stop(word) values(%s)"
try:
cur.execute(query,(word ) )
conn.commit()
except:
print "insert fail-->"+word
#加载停用词
def load_stop():
file_name="dict/stopword.txt"
file=open(file_name,"r")
lines=file.readlines()
for l in lines:
insert_stop(l.strip())
def insert_similar(word,word_class):
query3="insert into similar(word,class) values(%s,%s)"
try:
cur.execute(query3,(word,word_class))
conn.commit()
except:
print "insert fail"
#加载同义词
def load_similar():
file_name="dict/similarWord.txt"
file=open(file_name,"r")
lines=file.readlines()
for l in lines:
words=l.split(' ')
word_class=words[0]
for i in range(1,len(words)):
w=words[i]
print w
insert_similar(w.strip(),word_class.strip())
print "load_similar finish"
def insert_level(word,word_class,score):
query4="insert into level(word,class,score) values(%s,%s,%s)"
try:
cur.execute(query4,(word,word_class,score))
conn.commit()
except:
print "insert fail"
def load_level_sub(file_name,word_class,score):
file=open(file_name,"r")
lines=file.readlines()
for l in lines:
w=l.strip()
print w
print word_class
print score
insert_level(w,word_class,score)
print file_name+" load_level finish"
file.close()
#加载程度副词
def load_level():
most="dict/most.txt"
most_score=6.5
load_level_sub(most,1,most_score)
very="dict/very.txt"
very_score=4.5
load_level_sub(very,2,very_score)
more="dict/more.txt"
more_score=2.5
load_level_sub(more,3,more_score)
ish="dict/ish.txt"
ish_score=1.5
load_level_sub(ish,4,ish_score)
insufficiently="dict/insufficiently.txt"
insufficiently_score=0.75
load_level_sub(insufficiently,5,insufficiently_score)
#over="dict/over.txt"
#over_score=0.05
#load_level_sub(over,6,over_score)
def insert_inverse(word):
query="insert into inverse(word) values(%s)"
try:
cur.execute(query,(word ) )
conn.commit()
except:
print "insert fail-->"+word
#加载否定词
def load_inverse():
file_name="dict/inverse.txt"
file=open(file_name,"r")
lines=file.readlines()
for l in lines:
insert_inverse(l.strip())
def insert_sentiment(word,polar):
query="insert into sentiment(word,polar) values(%s,%s)"
try:
cur.execute(query,(word,polar))
conn.commit()
except:
print "insert fail->>"+word
# 0代表中性,1代表褒义,2代表贬义,3代表兼有褒贬两性。
def load_sentiment():
neg_file = "dict/negtive.txt"
pos_file = "dict/postive.txt"
fileneg = open(neg_file, "r")
lines_neg = fileneg.readlines()
for l in lines_neg:
w = l.strip()
if w != "" and w !=" ":
insert_sentiment(w, 2)
# ws=getSimilar(w)
# for wss in ws:
# insert_sentiment(wss,2)
########################
filepos = open(pos_file, "r")
lines_pos = filepos.readlines()
for l in lines_pos:
w = l.strip()
if w != "" and w !=" ":
insert_sentiment(w, 1)
# ws=getSimilar(w)
# for wss in ws:
# insert_sentiment(wss,1)
"""
print "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
raw_input("sssssssss")
#======insert similar ==================
for l in lines_neg:
w=l.strip()
ws=getSimilar(w)
for wss in ws:
insert_sentiment(wss,2)
########################
for l in lines_pos:
w=l.strip()
ws=getSimilar(w)
for wss in ws:
insert_sentiment(wss,1)
"""
def judgePolar(text):
tokens=pseg.cut(text)
words=[]
flag=[]
polars=["","postive","negtive"]
levels=["","most","more","very","little","insufficiently"]
sss=""
for w,pos in tokens:
#print w
words.append(w)
stop=isstopWord(w)
if 1==stop:
flag.append(("none",""))
continue
inv=isInverseWord(w)
level,lev_score=getLevel(w)
if 1 == inv:
flag.append(("inverse",""))
continue
#print ("%s----->[inverse]"%(w))
elif -1 != level:
flag.append(("level",levels[level]))
continue
else:
polar=getPolar(w)
if polar != -1:
flag.append(("sentiment",polars[polar]))
#print ("%s----->[sentiment]%s"%(w,polars[polar]))
else:
flag.append(("none",""))
#print ("%s----->[none]"%(w))
pos_score=0.0
neg_score=0.0
inv_cont=0
lev_cont=0
for i in range(len(flag)):
if flag[i][0] == "sentiment":
#postive
sss = sss + "-" + words[i] + "[" + flag[i][1] + "]"
tmp_score = 1.0
# 往前找程度副词
for k in range(i - 1, -1, -1):
if flag[k][0] == "sentiment":
break
elif flag[k][0] == "level":
level,lev_score= getLevel(words[k])
tmp_score = tmp_score * lev_score
break
#往前计算否定词的数量
tmp_inv_cnt=0
for k in range(i - 1, -1, -1):
if flag[k][0] == "sentiment":
break
elif flag[k][0] == "inverse":
tmp_inv_cnt+=1
if flag[i][1]==polars[1]:
if tmp_inv_cnt%2 != 0 :
neg_score+=tmp_score
else:
pos_score+=tmp_score
else:
if tmp_inv_cnt%2 != 0 :
pos_score+=tmp_score
else:
neg_score+=tmp_score
elif flag[i][0] == "inverse":
sss = sss + "-" +words[i] + "[" + flag[i][0] + "]"
inv_cont = inv_cont + 1
elif flag[i][0] == "level":
lev_cont+=1
sss = sss + "-" + words[i] + "[" + flag[i][0] + "]"
else:
sss = sss + "-" + words[i]
#print ("pos_score=%f"%(pos_score))
#print ("neg_score=%f"%(neg_score))
#print sss
pola=0
#sum=pos_score+neg_score
if pos_score<=0.000000001 and neg_score<=0.00000001:
if inv_cont%2!=0:
pola=2
return (pola,0.0,0.5,sss)
elif inv_cont!=0:
pola=1
return (pola,0.5,0.0,sss)
else:
if lev_cont !=0:
return (1,0.2,0.0,sss)
else:
return (0,0.0,0.0,sss)
elif pos_score < neg_score:
pola=2
elif pos_score > neg_score:
pola=1
else:
if inv_cont%2!=0:
pola=2
return (pola,pos_score,neg_score,sss)
elif inv_cont!=0:
pola=1
return (pola,pos_score,neg_score,sss)
else:
if lev_cont !=0:
return (1,pos_score+0.25,neg_score,sss)
else:
return (0,pos_score,neg_score,sss)
#print ("polar=%s"%(polars[pola]))
return (pola,pos_score,neg_score,sss)
def process_text(text):
sp=',|,|。|\?|!|~|;|;|\n'
texts=re.split(sp, text)
ret=[]
for line in texts:
w=line.strip()
if w =="" :
pass
#print "null -->"+line
else:
t=judgePolar(w)
print ("%s===>%d %f %f "%(t[3],t[0],t[1],t[2]))
ret.append(t)
return ret
def calculate_score(data):
pos=0.0
neg=0.0
cnt=len(data)
pos_cnt=0
neg_cnt=0
for k in data:
if k[0] !=0:
pos+=k[1]
neg+=k[2]
if k[0]==1:
pos_cnt+=1
elif k[0]==2:
neg_cnt+=1
if pos>neg:
return 1
elif pos<neg:
return -1
else:
if pos_cnt>neg_cnt:
return 1
elif pos_cnt<neg_cnt:
return -1
else:
return random.randint(0,1)-1
def allJudge(data_train,target_train):
ret=[]
cnt=0
for i in range(len(data_train)):
print ("%d================================================="%cnt)
text=data_train[i]
label=target_train[i]
arr=process_text(text)
result=calculate_score(arr)
if result ==1:
if label == "negative" :
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>error---suppose negative but positive-------%d"%cnt)
ret.append("positive")
else:
if label == "positive" :
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>error---suppose positive but negative-------%d"%cnt)
ret.append("negative")
cnt+=1
return ret
"""
#@@@@@@@@@@@@@@@@@@
load_similar()
load_level()
load_inverse()
load_sentiment()
load_stop()
#@@@@@@@@@@@@@@@@@@@@
"""
print "build_sentiment_dict"
build_sentiment_dict()
print "build_level_dict"
build_level_dict()
print "build_InverseWord_dict"
build_InverseWord_dict()
print "build_stopWord_dict"
build_stopWord_dict()
| apache-2.0 |
airanmehr/bio | Scripts/TimeSeriesPaper/Plot/MarkovBrownian.py | 1 | 5466 | '''
Copyleft Oct 03, 2016 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: airanmehr@gmail.com
'''
import itertools
import sys
import numpy as np;
sys.path.insert(1, '/home/arya/workspace/bio/')
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
import Utils.Plots as pplt
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
import seaborn as sns
import pylab as plt;
import matplotlib as mpl
import os;
import scipy.stats as stats
home = os.path.expanduser('~') + '/'
import Utils.Util as utl
from Utils import Simulation
import CLEAR.Libs.Markov as mkv
sns.set_style("whitegrid", {"grid.color": "0.9", 'axes.linewidth': .05, "grid.linewidth": ".09"})
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']});
mpl.rc('text', usetex=True)
def runOne(args):
path = utl.outpath + 'markov/simulations/'
utl.mkdir(path)
numExp = int(1e5)
nu0, s = args
print nu0, s
for i, batch in enumerate(utl.batch(range(numExp), 10000)):
print;
print i, batch[0], batch[-1]
a = pd.concat(map(lambda x: Simulation.simulateSingleLoci(nu0=nu0, s=s)[[1, 10, 100]], batch), axis=1).T
a.to_pickle(path + 'nu{:E}.s{:E}.{}.df'.format(nu0, s, i))
def createData():
runOne((0.1, 0))
# map(runOne, [(nu0, s) for nu0 in [0.005, 0.1] for s in [0, 0.1]])
def loadData(nu0=0.005, s=0):
return pd.concat(
map(lambda i: pd.read_pickle(utl.outpath + 'markov/simulations/' + 'nu{:E}.s{:E}.{}.df'.format(nu0, s, i)),
range(10)))
def getMarkov(nu0, s, tau):
T = mkv.Markov.computeTransition(s, 1000)
v = T.loc[nu0].copy(True)
for _ in range(tau - 1):
v = v.dot(T)
return v.rename('Markov Chain')
def getBrownian(x, nu0, tau, mu):
xsmooth = np.arange(np.min(x), np.max(x) + 1e-14, 0.00001)
N = 2000;
h = 2 * nu0 * (1 - nu0);
sig2 = h * tau / N;
brown = pd.Series(stats.norm(mu, sig2 * 100).pdf(xsmooth), index=xsmooth);
return (brown.size / float(len(x)) * brown / brown.sum()).rename('Brownian Motion')
def getObservation(nu0, s, tau, smoothWindow=1):
a = loadData(nu0, s)[str(tau)].value_counts().sort_index()
a /= a.sum()
return utl.smooth(a, smoothWindow)
# numExp = a.sum() * 1.0
# obs= a.apply(lambda x: pd.Series(np.random.binomial(numExp, x / numExp, 10))/numExp).stack().reset_index()
# obs.columns=[r'$\nu_0$','i','Observation']
# return obs
def createPlotData():
res = []
for s, nu0 in itertools.product([0, 0.1], [0.005, 0.1]):
for tau in [1, 10, 100]:
observation = getObservation(nu0, s, tau)
x = observation.index.values
markov = getMarkov(nu0, s, tau).loc[x]
brownian = getBrownian(x=x, nu0=nu0, tau=tau, mu=markov.idxmax())
res += [pd.Series([observation, markov, brownian], index=['observation', 'markov', 'brownian']).rename(
(nu0, s, tau))]
pd.concat(res, axis=1).to_pickle(utl.outpath + 'markov/simulations/plotData.df')
# color=sns.color_palette("cubehelix", 8)
color_map = plt.cm.Set1(np.linspace(0, 1, 9))
color = {None: color_map[0], 'Markov Chain': color_map[1], 'Brownian Motion': color_map[2]}
def plot():
fontsize = 5
def plotOne(x, ax):
lw = 3
alpha = 0.8
try:
if x.name is None:
alpha = 1
lw = 0.7
except:
pass
if x is not None:
x.plot(ax=ax, color=color[x.name], lw=lw, alpha=alpha)
fig, axes = plt.subplots(4, 3, figsize=(7, 3.9), dpi=300)
df = pd.read_pickle(utl.outpath + 'markov/simulations/plotData.df')
ABC = [list('ABC'), list('DEF'), list('GHI'), list('KLM')]
for (s, nu0), axr, titles in zip(itertools.product([0, 0.1], [0.005, 0.1]), axes, ABC):
for tau, ax, title in zip([1, 10, 100], axr, titles):
observation = getObservation(nu0, s, tau)
x = observation.index.values
brownian = getBrownian(x=x, nu0=nu0, tau=tau, mu=nu0)
markov = df[(nu0, s, tau)].loc['markov']
df[(nu0, s, tau)] = pd.Series([observation, markov, brownian],
index=['observation', 'markov', 'brownian']).rename((nu0, s, tau))
if s: df[(nu0, s, tau)].loc['brownian'] = None
df[(nu0, s, tau)].loc[['markov', 'brownian', 'observation']].apply(lambda x: plotOne(x, ax))
if nu0 == 0.005 and tau == 100: ax.set_xlim([0, 0.02])
ax.locator_params(nbins=1, axis='y')
if nu0 == 0.005 and tau == 100 and s == 0:
ax.legend(['Markov Chain', 'Brownian Motion', 'Empirical Distribution'], fontsize=fontsize)
ax.set_xticks(ax.get_xticks()[::2]);
# ax.set_xticklabels(map(str,tick))
pplt.annotate('(' + title + ')', fontsize=fontsize, ax=ax)
pplt.setSize(ax, fontsize)
axr[0].set_ylabel(r'$P(\nu_\tau|\nu_0={},s={}$)'.format(nu0, s), fontsize=fontsize + 2, rotation=0, labelpad=30)
# ax.text(0.0,0.0,)
for tau, ax in zip([1, 10, 100], axes[0]):
ax.set_title(r'$\tau={}$'.format(tau), fontsize=fontsize)
for ax in axr:
ax.set_xlabel(r'$\nu$', fontsize=fontsize)
plt.gcf().tight_layout(pad=0.1, rect=[0.05, 0, 1, 1])
pplt.savefig('markovDists', 300)
plt.show()
if __name__ == '__main__':
# createPlotData()
plot()
| mit |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/matplotlib/tests/test_lines.py | 5 | 5451 | """
Tests specific to the lines module.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import itertools
import matplotlib.lines as mlines
import nose
from nose.tools import assert_true, assert_raises
from timeit import repeat
import numpy as np
from cycler import cycler
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup, image_comparison
@cleanup
def test_invisible_Line_rendering():
"""
Github issue #1256 identified a bug in Line.draw method
Despite visibility attribute set to False, the draw method was not
returning early enough and some pre-rendering code was executed
though not necessary.
Consequence was an excessive draw time for invisible Line instances
holding a large number of points (Npts> 10**6)
"""
# Creates big x and y data:
N = 10**7
x = np.linspace(0,1,N)
y = np.random.normal(size=N)
# Create a plot figure:
fig = plt.figure()
ax = plt.subplot(111)
# Create a "big" Line instance:
l = mlines.Line2D(x,y)
l.set_visible(False)
# but don't add it to the Axis instance `ax`
# [here Interactive panning and zooming is pretty responsive]
# Time the canvas drawing:
t_no_line = min(repeat(fig.canvas.draw, number=1, repeat=3))
# (gives about 25 ms)
# Add the big invisible Line:
ax.add_line(l)
# [Now interactive panning and zooming is very slow]
# Time the canvas drawing:
t_unvisible_line = min(repeat(fig.canvas.draw, number=1, repeat=3))
# gives about 290 ms for N = 10**7 pts
slowdown_factor = (t_unvisible_line/t_no_line)
slowdown_threshold = 2 # trying to avoid false positive failures
assert_true(slowdown_factor < slowdown_threshold)
@cleanup
def test_set_line_coll_dash():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
np.random.seed(0)
# Testing setting linestyles for line collections.
# This should not produce an error.
cs = ax.contour(np.random.randn(20, 30), linestyles=[(0, (3, 3))])
assert True
@image_comparison(baseline_images=['line_dashes'], remove_text=True)
def test_line_dashes():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(range(10), linestyle=(0, (3, 3)), lw=5)
@cleanup
def test_line_colors():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(range(10), color='none')
ax.plot(range(10), color='r')
ax.plot(range(10), color='.3')
ax.plot(range(10), color=(1, 0, 0, 1))
ax.plot(range(10), color=(1, 0, 0))
fig.canvas.draw()
assert True
@cleanup
def test_linestyle_variants():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for ls in ["-", "solid", "--", "dashed",
"-.", "dashdot", ":", "dotted"]:
ax.plot(range(10), linestyle=ls)
fig.canvas.draw()
assert True
@cleanup
def test_valid_linestyles():
line = mlines.Line2D([], [])
with assert_raises(ValueError):
line.set_linestyle('aardvark')
@cleanup
def test_drawstyle_variants():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for ds in ("default", "steps-mid", "steps-pre", "steps-post",
"steps", None):
ax.plot(range(10), drawstyle=ds)
fig.canvas.draw()
assert True
@cleanup
def test_valid_drawstyles():
line = mlines.Line2D([], [])
with assert_raises(ValueError):
line.set_drawstyle('foobar')
@image_comparison(baseline_images=['line_collection_dashes'], remove_text=True)
def test_set_line_coll_dash_image():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
np.random.seed(0)
cs = ax.contour(np.random.randn(20, 30), linestyles=[(0, (3, 3))])
@image_comparison(baseline_images=['marker_fill_styles'], remove_text=True,
extensions=['png'])
def test_marker_fill_styles():
colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
altcolor = 'lightgreen'
y = np.array([1, 1])
x = np.array([0, 9])
fig, ax = plt.subplots()
for j, marker in enumerate(mlines.Line2D.filled_markers):
for i, fs in enumerate(mlines.Line2D.fillStyles):
color = next(colors)
ax.plot(j * 10 + x, y + i + .5 * (j % 2),
marker=marker,
markersize=20,
markerfacecoloralt=altcolor,
fillstyle=fs,
label=fs,
linewidth=5,
color=color,
markeredgecolor=color,
markeredgewidth=2)
ax.set_ylim([0, 7.5])
ax.set_xlim([-5, 155])
@image_comparison(baseline_images=['scaled_lines'], style='default')
def test_lw_scaling():
th = np.linspace(0, 32)
fig, ax = plt.subplots()
lins_styles = ['dashed', 'dotted', 'dashdot']
cy = cycler(matplotlib.rcParams['axes.prop_cycle'])
for j, (ls, sty) in enumerate(zip(lins_styles, cy)):
for lw in np.linspace(.5, 10, 10):
ax.plot(th, j*np.ones(50) + .1 * lw, linestyle=ls, lw=lw, **sty)
def test_nan_is_sorted():
line = mlines.Line2D([], [])
assert_true(line._is_sorted(np.array([1, 2, 3])))
assert_true(line._is_sorted(np.array([1, np.nan, 3])))
assert_true(not line._is_sorted([3, 5] + [np.nan] * 100 + [0, 2]))
if __name__ == '__main__':
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| apache-2.0 |
sanginnwoo/longtailedtit | doc/tests/rogers_stallybrass_clements.py | 11 | 2336 | #!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected_rsc01():
# these data were generated by Mathematica
data = [
[0.996808823, 0],
[0.989844967, 0.141863271],
[0.976194821, 0.284448102],
[0.953409148, 0.428342817],
[0.920049652, 0.574319928],
[0.87624791, 0.723271831],
[0.823630904, 0.876122478],
[0.764738453, 1.033745067],
[0.702321448, 1.196905344],
[0.638830444, 1.366234168],
[0.57618155, 1.542222489],
[0.515734438, 1.725228947],
[0.458380406, 1.915491935],
[0.404663814, 2.11314096],
[0.35489631, 2.318204555],
[0.309248761, 2.530613669],
[0.267818662, 2.750200428],
[0.230675114, 2.976692948],
[0.197883279, 3.20970778],
[0.169508198, 3.448742886],
[0.145596157, 3.693175817],
[0.126132849, 3.942273462],
[0.11098406, 4.195219984],
[0.099837318, 4.451166256],
[0.092175519, 4.709296094],
[0.087311826, 4.968894341],
[0.084488131, 5.22939629],
[0.08299976, 5.490403553],
[0.082290809, 5.751666319],
[0.081986378, 6.013045608],
[0.081868622, 6.274472787]]
return zip(*data)
def rsc01():
f = open("../../tests/rogers_stallybrass_clements/gold/rsc01_swater_0191.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
s = [d[0] for d in data]
x = [d[2] for d in data]
return (s, x)
def rsc02():
f = open("../../tests/rogers_stallybrass_clements/gold/rsc02_swater_0016.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
s = [d[0] for d in data]
x = [d[2] for d in data]
return (s, x)
plt.figure()
plt.plot(expected_rsc01()[0], expected_rsc01()[1], 'k-', linewidth = 2.0, label = 'Analytic (RSC)')
plt.plot(rsc01()[0], rsc01()[1], 'rs', markersize = 6.0, label = 'MOOSE (high-res)')
plt.plot(rsc02()[0], rsc02()[1], 'g^', markersize = 5.0, label = 'MOOSE (low-res)')
plt.legend(loc = 'lower right')
plt.xlabel("Saturation")
plt.ylabel("Depth (m)")
plt.title("Water saturation at t=5s for infiltration into two-phase system")
plt.gca().invert_yaxis()
plt.savefig("rsc.pdf")
sys.exit(0)
| lgpl-2.1 |
faneshion/MatchZoo | matchzoo/preprocessors/diin_preprocessor.py | 1 | 6300 | """DIIN Preprocessor."""
from tqdm import tqdm
import pandas as pd
from matchzoo.engine.base_preprocessor import BasePreprocessor
from matchzoo import DataPack
from .build_vocab_unit import build_vocab_unit
from .chain_transform import chain_transform
from . import units
tqdm.pandas()
class DIINPreprocessor(BasePreprocessor):
"""DIIN Model preprocessor."""
def __init__(self,
fixed_length_left: int = 10,
fixed_length_right: int = 10,
fixed_length_word: int = 5):
"""
DIIN Model preprocessor.
:param fixed_length_left: Integer, maximize length of :attr:'left' in
the data_pack.
:param fixed_length_right: Integer, maximize length of :attr:'right' in
the data_pack.
:param fixed_length_word: Integer, maximize length of each word.
Example:
>>> import matchzoo as mz
>>> train_data = mz.datasets.toy.load_data()
>>> test_data = mz.datasets.toy.load_data(stage='test')
>>> diin_preprocessor = mz.preprocessors.DIINPreprocessor(
... fixed_length_left=5,
... fixed_length_right=5,
... fixed_length_word=3,
... )
>>> diin_preprocessor = diin_preprocessor.fit(
... train_data, verbose=0)
>>> diin_preprocessor.context['input_shapes']
[(5,), (5,), (5, 3), (5, 3), (5,), (5,)]
>>> diin_preprocessor.context['vocab_size']
893
>>> train_data_processed = diin_preprocessor.transform(
... train_data, verbose=0)
>>> type(train_data_processed)
<class 'matchzoo.data_pack.data_pack.DataPack'>
>>> test_data_processed = diin_preprocessor.transform(
... test_data, verbose=0)
>>> type(test_data_processed)
<class 'matchzoo.data_pack.data_pack.DataPack'>
"""
super().__init__()
self._fixed_length_left = fixed_length_left
self._fixed_length_right = fixed_length_right
self._fixed_length_word = fixed_length_word
self._left_fixedlength_unit = units.FixedLength(
self._fixed_length_left,
pad_value='0',
pad_mode='post'
)
self._right_fixedlength_unit = units.FixedLength(
self._fixed_length_right,
pad_value='0',
pad_mode='post'
)
self._units = self._default_units()
def fit(self, data_pack: DataPack, verbose: int = 1):
"""
Fit pre-processing context for transformation.
:param data_pack: data_pack to be preprocessed.
:param verbose: Verbosity.
:return: class:'DIINPreprocessor' instance.
"""
func = chain_transform(self._units)
data_pack = data_pack.apply_on_text(func, mode='both', verbose=verbose)
vocab_unit = build_vocab_unit(data_pack, verbose=verbose)
vocab_size = len(vocab_unit.state['term_index'])
self._context['vocab_unit'] = vocab_unit
self._context['vocab_size'] = vocab_size
self._context['embedding_input_dim'] = vocab_size
data_pack = data_pack.apply_on_text(
units.NgramLetter(ngram=1, reduce_dim=True).transform,
mode='both', verbose=verbose)
char_unit = build_vocab_unit(data_pack, verbose=verbose)
self._context['char_unit'] = char_unit
self._context['input_shapes'] = [
(self._fixed_length_left,),
(self._fixed_length_right,),
(self._fixed_length_left, self._fixed_length_word,),
(self._fixed_length_right, self._fixed_length_word,),
(self._fixed_length_left,),
(self._fixed_length_right,)
]
return self
def transform(self, data_pack: DataPack, verbose: int = 1) -> DataPack:
"""
Apply transformation on data.
:param data_pack: Inputs to be preprocessed.
:param verbose: Verbosity.
:return: Transformed data as :class:'DataPack' object.
"""
data_pack = data_pack.copy()
data_pack.apply_on_text(
chain_transform(self._units),
mode='both', inplace=True, verbose=verbose)
# Process character representation
data_pack.apply_on_text(
units.NgramLetter(ngram=1, reduce_dim=False).transform,
rename=('char_left', 'char_right'),
mode='both', inplace=True, verbose=verbose)
char_index_dict = self._context['char_unit'].state['term_index']
left_charindex_unit = units.CharacterIndex(
char_index_dict, self._fixed_length_left, self._fixed_length_word)
right_charindex_unit = units.CharacterIndex(
char_index_dict, self._fixed_length_right, self._fixed_length_word)
data_pack.left['char_left'] = data_pack.left['char_left'].apply(
left_charindex_unit.transform)
data_pack.right['char_right'] = data_pack.right['char_right'].apply(
right_charindex_unit.transform)
# Process word representation
data_pack.apply_on_text(
self._context['vocab_unit'].transform,
mode='both', inplace=True, verbose=verbose)
# Process exact match representation
frame = data_pack.relation.join(
data_pack.left, on='id_left', how='left'
).join(data_pack.right, on='id_right', how='left')
left_exactmatch_unit = units.WordExactMatch(
self._fixed_length_left, match='text_left', to_match='text_right')
right_exactmatch_unit = units.WordExactMatch(
self._fixed_length_right, match='text_right', to_match='text_left')
data_pack.relation['match_left'] = frame.apply(
left_exactmatch_unit.transform, axis=1)
data_pack.relation['match_right'] = frame.apply(
right_exactmatch_unit.transform, axis=1)
data_pack.apply_on_text(
self._left_fixedlength_unit.transform,
mode='left', inplace=True, verbose=verbose)
data_pack.apply_on_text(
self._right_fixedlength_unit.transform,
mode='right', inplace=True, verbose=verbose)
return data_pack
| apache-2.0 |
jm-begon/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
bchareyre/ratchet | examples/simple-scene/simple-scene-plot.py | 8 | 2026 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('TkAgg')
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Box_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Box_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()]
),
NewtonIntegrator(damping=.2,gravity=(0,0,-9.81)),
###
### NOTE this extra engine:
###
### You want snapshot to be taken every 1 sec (realTimeLim) or every 50 iterations (iterLim),
### whichever comes soones. virtTimeLim attribute is unset, hence virtual time period is not taken into account.
PyRunner(iterPeriod=20,command='myAddPlotData()')
]
O.bodies.append(box(center=[0,0,0],extents=[.5,.5,.5],fixed=True,color=[1,0,0]))
O.bodies.append(sphere([0,0,2],1,color=[0,1,0]))
O.dt=.002*PWaveTimeStep()
############################################
##### now the part pertaining to plots #####
############################################
from yade import plot
## we will have 2 plots:
## 1. t as function of i (joke test function)
## 2. i as function of t on left y-axis ('|||' makes the separation) and z_sph, v_sph (as green circles connected with line) and z_sph_half again as function of t
plot.plots={'i':('t'),'t':('z_sph',None,('v_sph','go-'),'z_sph_half')}
## this function is called by plotDataCollector
## it should add data with the labels that we will plot
## if a datum is not specified (but exists), it will be NaN and will not be plotted
def myAddPlotData():
sph=O.bodies[1]
## store some numbers under some labels
plot.addData(t=O.time,i=O.iter,z_sph=sph.state.pos[2],z_sph_half=.5*sph.state.pos[2],v_sph=sph.state.vel.norm())
print "Now calling plot.plot() to show the figures. The timestep is artificially low so that you can watch graphs being updated live."
plot.liveInterval=.2
plot.plot(subPlots=False)
O.run(int(2./O.dt));
#plot.saveGnuplot('/tmp/a')
## you can also access the data in plot.data['i'], plot.data['t'] etc, under the labels they were saved.
| gpl-2.0 |
musically-ut/statsmodels | statsmodels/examples/ex_generic_mle_t.py | 29 | 10826 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 08:28:04 2010
Author: josef-pktd
"""
from __future__ import print_function
import numpy as np
from scipy import stats, special
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
#redefine some shortcuts
np_log = np.log
np_pi = np.pi
sps_gamln = special.gammaln
def maxabs(arr1, arr2):
return np.max(np.abs(arr1 - arr2))
def maxabsrel(arr1, arr2):
return np.max(np.abs(arr2 / arr1 - 1))
class MyT(GenericLikelihoodModel):
'''Maximum Likelihood Estimation of Poisson Model
This is an example for generic MLE which has the same
statistical model as discretemod.Poisson.
Except for defining the negative log-likelihood method, all
methods and results are generic. Gradients and Hessian
and all resulting statistics are based on numerical
differentiation.
'''
def loglike(self, params):
return -self.nloglikeobs(params).sum(0)
# copied from discretemod.Poisson
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array-like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
--------
.. math :: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
#print len(params),
beta = params[:-2]
df = params[-2]
scale = params[-1]
loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
#next part is stats.t._logpdf
lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)
lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)
lPx -= np_log(scale) # correction for scale
return -lPx
#Example:
np.random.seed(98765678)
nobs = 1000
rvs = np.random.randn(nobs,5)
data_exog = sm.add_constant(rvs, prepend=False)
xbeta = 0.9 + 0.1*rvs.sum(1)
data_endog = xbeta + 0.1*np.random.standard_t(5, size=nobs)
#print data_endog
modp = MyT(data_endog, data_exog)
modp.start_value = np.ones(data_exog.shape[1]+2)
modp.start_value[-2] = 10
modp.start_params = modp.start_value
resp = modp.fit(start_params = modp.start_value)
print(resp.params)
print(resp.bse)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
hb=-approx_hess(modp.start_value, modp.loglike, epsilon=-1e-4)
tmp = modp.loglike(modp.start_value)
print(tmp.shape)
'''
>>> tmp = modp.loglike(modp.start_value)
8
>>> tmp.shape
(100,)
>>> tmp.sum(0)
-24220.877108016182
>>> tmp = modp.nloglikeobs(modp.start_value)
8
>>> tmp.shape
(100, 100)
>>> np.dot(modp.exog, beta).shape
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'beta' is not defined
>>> params = modp.start_value
>>> beta = params[:-2]
>>> beta.shape
(6,)
>>> np.dot(modp.exog, beta).shape
(100,)
>>> modp.endog.shape
(100, 100)
>>> xbeta.shape
(100,)
>>>
'''
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
repr(start_params) array([ 1., 1., 1., 1., 1., 1., 1., 1.])
Optimization terminated successfully.
Current function value: 91.897859
Iterations: 108
Function evaluations: 173
Gradient evaluations: 173
[ 1.58253308e-01 1.73188603e-01 1.77357447e-01 2.06707494e-02
-1.31174789e-01 8.79915580e-01 6.47663840e+03 6.73457641e+02]
[ NaN NaN NaN NaN NaN
28.26906182 NaN NaN]
()
>>> resp.params
array([ 1.58253308e-01, 1.73188603e-01, 1.77357447e-01,
2.06707494e-02, -1.31174789e-01, 8.79915580e-01,
6.47663840e+03, 6.73457641e+02])
>>> resp.bse
array([ NaN, NaN, NaN, NaN,
NaN, 28.26906182, NaN, NaN])
>>> resp.jac
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'GenericLikelihoodModelResults' object has no attribute 'jac'
>>> resp.bsejac
array([ 45243.35919908, 51997.80776897, 41418.33021984,
42763.46575168, 50101.91631612, 42804.92083525,
3005625.35649203, 13826948.68708931])
>>> resp.bsejhj
array([ 1.51643931, 0.80229636, 0.27720185, 0.4711138 , 0.9028682 ,
0.31673747, 0.00524426, 0.69729368])
>>> resp.covjac
array([[ 2.04696155e+09, 1.46643494e+08, 7.59932781e+06,
-2.39993397e+08, 5.62644255e+08, 2.34300598e+08,
-3.07824799e+09, -1.93425470e+10],
[ 1.46643494e+08, 2.70377201e+09, 1.06005712e+08,
3.76824011e+08, -1.21778986e+08, 5.38612723e+08,
-2.12575784e+10, -1.69503271e+11],
[ 7.59932781e+06, 1.06005712e+08, 1.71547808e+09,
-5.94451158e+07, -1.44586401e+08, -5.41830441e+06,
1.25899515e+10, 1.06372065e+11],
[ -2.39993397e+08, 3.76824011e+08, -5.94451158e+07,
1.82871400e+09, -5.66930891e+08, 3.75061111e+08,
-6.84681772e+09, -7.29993789e+10],
[ 5.62644255e+08, -1.21778986e+08, -1.44586401e+08,
-5.66930891e+08, 2.51020202e+09, -4.67886982e+08,
1.78890380e+10, 1.75428694e+11],
[ 2.34300598e+08, 5.38612723e+08, -5.41830441e+06,
3.75061111e+08, -4.67886982e+08, 1.83226125e+09,
-1.27484996e+10, -1.12550321e+11],
[ -3.07824799e+09, -2.12575784e+10, 1.25899515e+10,
-6.84681772e+09, 1.78890380e+10, -1.27484996e+10,
9.03378378e+12, 2.15188047e+13],
[ -1.93425470e+10, -1.69503271e+11, 1.06372065e+11,
-7.29993789e+10, 1.75428694e+11, -1.12550321e+11,
2.15188047e+13, 1.91184510e+14]])
>>> hb
array([[ 33.68732564, -2.33209221, -13.51255321, -1.60840159,
-13.03920385, -9.3506543 , 4.86239173, -9.30409101],
[ -2.33209221, 3.12512611, -6.08530968, -6.79232244,
3.66804898, 1.26497071, 5.10113409, -2.53482995],
[ -13.51255321, -6.08530968, 31.14883498, -5.01514705,
-10.48819911, -2.62533035, 3.82241581, -12.51046342],
[ -1.60840159, -6.79232244, -5.01514705, 28.40141917,
-8.72489636, -8.82449456, 5.47584023, -18.20500017],
[ -13.03920385, 3.66804898, -10.48819911, -8.72489636,
9.03650914, 3.65206176, 6.55926726, -1.8233635 ],
[ -9.3506543 , 1.26497071, -2.62533035, -8.82449456,
3.65206176, 21.41825348, -1.28610793, 4.28101146],
[ 4.86239173, 5.10113409, 3.82241581, 5.47584023,
6.55926726, -1.28610793, 46.52354448, -32.23861427],
[ -9.30409101, -2.53482995, -12.51046342, -18.20500017,
-1.8233635 , 4.28101146, -32.23861427, 178.61978279]])
>>> np.linalg.eigh(hb)
(array([ -10.50373649, 0.7460258 , 14.73131793, 29.72453087,
36.24103832, 41.98042979, 48.99815223, 190.04303734]), array([[-0.40303259, 0.10181305, 0.18164206, 0.48201456, 0.03916688,
0.00903695, 0.74620692, 0.05853619],
[-0.3201713 , -0.88444855, -0.19867642, 0.02828812, 0.16733946,
-0.21440765, -0.02927317, 0.01176904],
[-0.41847094, 0.00170161, 0.04973298, 0.43276118, -0.55894304,
0.26454728, -0.49745582, 0.07251685],
[-0.3508729 , -0.08302723, 0.25004884, -0.73495077, -0.38936448,
0.20677082, 0.24464779, 0.11448238],
[-0.62065653, 0.44662675, -0.37388565, -0.19453047, 0.29084735,
-0.34151809, -0.19088978, 0.00342713],
[-0.15119802, -0.01099165, 0.84377273, 0.00554863, 0.37332324,
-0.17917015, -0.30371283, -0.03635211],
[ 0.15813581, 0.0293601 , 0.09882271, 0.03515962, -0.48768565,
-0.81960996, 0.05248464, 0.22533642],
[-0.06118044, -0.00549223, 0.03205047, -0.01782649, -0.21128588,
-0.14391393, 0.05973658, -0.96226835]]))
>>> np.linalg.eigh(np.linalg.inv(hb))
(array([-0.09520422, 0.00526197, 0.02040893, 0.02382062, 0.02759303,
0.03364225, 0.06788259, 1.34043621]), array([[-0.40303259, 0.05853619, 0.74620692, -0.00903695, -0.03916688,
0.48201456, 0.18164206, 0.10181305],
[-0.3201713 , 0.01176904, -0.02927317, 0.21440765, -0.16733946,
0.02828812, -0.19867642, -0.88444855],
[-0.41847094, 0.07251685, -0.49745582, -0.26454728, 0.55894304,
0.43276118, 0.04973298, 0.00170161],
[-0.3508729 , 0.11448238, 0.24464779, -0.20677082, 0.38936448,
-0.73495077, 0.25004884, -0.08302723],
[-0.62065653, 0.00342713, -0.19088978, 0.34151809, -0.29084735,
-0.19453047, -0.37388565, 0.44662675],
[-0.15119802, -0.03635211, -0.30371283, 0.17917015, -0.37332324,
0.00554863, 0.84377273, -0.01099165],
[ 0.15813581, 0.22533642, 0.05248464, 0.81960996, 0.48768565,
0.03515962, 0.09882271, 0.0293601 ],
[-0.06118044, -0.96226835, 0.05973658, 0.14391393, 0.21128588,
-0.01782649, 0.03205047, -0.00549223]]))
>>> np.diag(np.linalg.inv(hb))
array([ 0.01991288, 1.0433882 , 0.00516616, 0.02642799, 0.24732871,
0.05281555, 0.02236704, 0.00643486])
>>> np.sqrt(np.diag(np.linalg.inv(hb)))
array([ 0.14111302, 1.02146375, 0.07187597, 0.16256686, 0.49732154,
0.22981633, 0.14955616, 0.08021756])
>>> hess = modp.hessian(resp.params)
>>> np.sqrt(np.diag(np.linalg.inv(hess)))
array([ 231.3823423 , 117.79508218, 31.46595143, 53.44753106,
132.4855704 , NaN, 5.47881705, 90.75332693])
>>> hb=-approx_hess(resp.params, modp.loglike, epsilon=-1e-4)
>>> np.sqrt(np.diag(np.linalg.inv(hb)))
array([ 31.93524822, 22.0333515 , NaN, 29.90198792,
38.82615785, NaN, NaN, NaN])
>>> hb=-approx_hess(resp.params, modp.loglike, epsilon=-1e-8)
>>> np.sqrt(np.diag(np.linalg.inv(hb)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Programs\Python25\lib\site-packages\numpy\linalg\linalg.py", line 423, in inv
return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "C:\Programs\Python25\lib\site-packages\numpy\linalg\linalg.py", line 306, in solve
raise LinAlgError, 'Singular matrix'
numpy.linalg.linalg.LinAlgError: Singular matrix
>>> resp.params
array([ 1.58253308e-01, 1.73188603e-01, 1.77357447e-01,
2.06707494e-02, -1.31174789e-01, 8.79915580e-01,
6.47663840e+03, 6.73457641e+02])
>>>
'''
| bsd-3-clause |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/tests/groupby/test_filters.py | 15 | 24350 | # -*- coding: utf-8 -*-
from __future__ import print_function
from numpy import nan
import pytest
from pandas import Timestamp
from pandas.core.index import MultiIndex
from pandas.core.api import DataFrame
from pandas.core.series import Series
from pandas.util.testing import (assert_frame_equal, assert_series_equal
)
from pandas.compat import (lmap)
from pandas import compat
import pandas.core.common as com
import numpy as np
import pandas.util.testing as tm
import pandas as pd
class TestGroupByFilter(object):
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_filter_series(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.Series([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.Series([20, 22, 24], index=[2, 4, 5])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(s.index))
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(s.index))
def test_filter_single_column_df(self):
df = pd.DataFrame([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.DataFrame([20, 22, 24], index=[2, 4, 5])
grouper = df[0].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(df.index))
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(df.index))
def test_filter_multi_column_df(self):
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() - x['B'].sum() > 10),
expected)
def test_filter_mixed_df(self):
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': ['b', 'c']}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 10), expected)
def test_filter_out_all_groups(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]])
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 1000), df.loc[[]])
def test_filter_out_no_groups(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
filtered = grouped.filter(lambda x: x.mean() > 0)
assert_series_equal(filtered, s)
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
filtered = grouped.filter(lambda x: x['A'].mean() > 0)
assert_frame_equal(filtered, df)
def test_filter_out_all_groups_in_df(self):
# GH12768
df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = df.groupby('a')
res = res.filter(lambda x: x['b'].sum() > 5, dropna=False)
expected = pd.DataFrame({'a': [nan] * 3, 'b': [nan] * 3})
assert_frame_equal(expected, res)
df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = df.groupby('a')
res = res.filter(lambda x: x['b'].sum() > 5, dropna=True)
expected = pd.DataFrame({'a': [], 'b': []}, dtype="int64")
assert_frame_equal(expected, res)
def test_filter_condition_raises(self):
def raise_if_sum_is_zero(x):
if x.sum() == 0:
raise ValueError
else:
return x.sum() > 0
s = pd.Series([-1, 0, 1, 2])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
pytest.raises(TypeError,
lambda: grouped.filter(raise_if_sum_is_zero))
def test_filter_with_axis_in_groupby(self):
# issue 11041
index = pd.MultiIndex.from_product([range(10), [0, 1]])
data = pd.DataFrame(
np.arange(100).reshape(-1, 20), columns=index, dtype='int64')
result = data.groupby(level=0,
axis=1).filter(lambda x: x.iloc[0, 0] > 10)
expected = data.iloc[:, 12:20]
assert_frame_equal(result, expected)
def test_filter_bad_shapes(self):
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = df['B']
g_df = df.groupby('B')
g_s = s.groupby(s)
f = lambda x: x
pytest.raises(TypeError, lambda: g_df.filter(f))
pytest.raises(TypeError, lambda: g_s.filter(f))
f = lambda x: x == 1
pytest.raises(TypeError, lambda: g_df.filter(f))
pytest.raises(TypeError, lambda: g_s.filter(f))
f = lambda x: np.outer(x, x)
pytest.raises(TypeError, lambda: g_df.filter(f))
pytest.raises(TypeError, lambda: g_s.filter(f))
def test_filter_nan_is_false(self):
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = df['B']
g_df = df.groupby(df['B'])
g_s = s.groupby(s)
f = lambda x: np.nan
assert_frame_equal(g_df.filter(f), df.loc[[]])
assert_series_equal(g_s.filter(f), s[[]])
def test_filter_against_workaround(self):
np.random.seed(0)
# Series of ints
s = Series(np.random.randint(0, 100, 1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Series of floats
s = 100 * Series(np.random.random(1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Set up DataFrame of ints, floats, strings.
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 1000
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'ints': Series(np.random.randint(0, 100, N)),
'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
# Group by ints; filter on floats.
grouped = df.groupby('ints')
old_way = df[grouped.floats.
transform(lambda x: x.mean() > N / 20).astype('bool')]
new_way = grouped.filter(lambda x: x['floats'].mean() > N / 20)
assert_frame_equal(new_way, old_way)
# Group by floats (rounded); filter on strings.
grouper = df.floats.apply(lambda x: np.round(x, -1))
grouped = df.groupby(grouper)
old_way = df[grouped.letters.
transform(lambda x: len(x) < N / 10).astype('bool')]
new_way = grouped.filter(lambda x: len(x.letters) < N / 10)
assert_frame_equal(new_way, old_way)
# Group by strings; filter on ints.
grouped = df.groupby('letters')
old_way = df[grouped.ints.
transform(lambda x: x.mean() > N / 20).astype('bool')]
new_way = grouped.filter(lambda x: x['ints'].mean() > N / 20)
assert_frame_equal(new_way, old_way)
def test_filter_using_len(self):
# BUG GH4447
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
grouped = df.groupby('B')
actual = grouped.filter(lambda x: len(x) > 2)
expected = DataFrame(
{'A': np.arange(2, 6),
'B': list('bbbb'),
'C': np.arange(2, 6)}, index=np.arange(2, 6))
assert_frame_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = df.loc[[]]
assert_frame_equal(actual, expected)
# Series have always worked properly, but we'll test anyway.
s = df['B']
grouped = s.groupby(s)
actual = grouped.filter(lambda x: len(x) > 2)
expected = Series(4 * ['b'], index=np.arange(2, 6), name='B')
assert_series_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = s[[]]
assert_series_equal(actual, expected)
def test_filter_maintains_ordering(self):
# Simple case: index is sequential. #4621
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]})
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
# Now index is sequentially decreasing.
df.index = np.arange(len(df) - 1, -1, -1)
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
# Index is shuffled.
SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
df.index = df.index[SHUFFLED]
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
def test_filter_multiple_timestamp(self):
# GH 10114
df = DataFrame({'A': np.arange(5, dtype='int64'),
'B': ['foo', 'bar', 'foo', 'bar', 'bar'],
'C': Timestamp('20130101')})
grouped = df.groupby(['B', 'C'])
result = grouped['A'].filter(lambda x: True)
assert_series_equal(df['A'], result)
result = grouped['A'].transform(len)
expected = Series([2, 3, 2, 3, 3], name='A')
assert_series_equal(result, expected)
result = grouped.filter(lambda x: True)
assert_frame_equal(df, result)
result = grouped.transform('sum')
expected = DataFrame({'A': [2, 8, 2, 8, 8]})
assert_frame_equal(result, expected)
result = grouped.transform(len)
expected = DataFrame({'A': [2, 3, 2, 3, 3]})
assert_frame_equal(result, expected)
def test_filter_and_transform_with_non_unique_int_index(self):
# GH4620
index = [1, 1, 1, 2, 1, 1, 0, 1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_multiple_non_unique_int_index(self):
# GH4620
index = [1, 1, 1, 2, 0, 0, 0, 1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_float_index(self):
# GH4620
index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_timestamp_index(self):
# GH4620
t0 = Timestamp('2013-09-30 00:05:00')
t1 = Timestamp('2013-10-30 00:05:00')
t2 = Timestamp('2013-11-30 00:05:00')
index = [t1, t1, t1, t2, t1, t1, t0, t1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_string_index(self):
# GH4620
index = list('bbbcbbab')
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_has_access_to_grouped_cols(self):
df = DataFrame([[1, 2], [1, 3], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
# previously didn't have access to col A #????
filt = g.filter(lambda x: x['A'].sum() == 2)
assert_frame_equal(filt, df.iloc[[0, 1]])
def test_filter_enforces_scalarness(self):
df = pd.DataFrame([
['best', 'a', 'x'],
['worst', 'b', 'y'],
['best', 'c', 'x'],
['best', 'd', 'y'],
['worst', 'd', 'y'],
['worst', 'd', 'y'],
['best', 'd', 'z'],
], columns=['a', 'b', 'c'])
with tm.assert_raises_regex(TypeError,
'filter function returned a.*'):
df.groupby('c').filter(lambda g: g['a'] == 'best')
def test_filter_non_bool_raises(self):
df = pd.DataFrame([
['best', 'a', 1],
['worst', 'b', 1],
['best', 'c', 1],
['best', 'd', 1],
['worst', 'd', 1],
['worst', 'd', 1],
['best', 'd', 1],
], columns=['a', 'b', 'c'])
with tm.assert_raises_regex(TypeError,
'filter function returned a.*'):
df.groupby('a').filter(lambda g: g.c.mean())
def test_filter_dropna_with_empty_groups(self):
# GH 10780
data = pd.Series(np.random.rand(9), index=np.repeat([1, 2, 3], 3))
groupped = data.groupby(level=0)
result_false = groupped.filter(lambda x: x.mean() > 1, dropna=False)
expected_false = pd.Series([np.nan] * 9,
index=np.repeat([1, 2, 3], 3))
tm.assert_series_equal(result_false, expected_false)
result_true = groupped.filter(lambda x: x.mean() > 1, dropna=True)
expected_true = pd.Series(index=pd.Index([], dtype=int))
tm.assert_series_equal(result_true, expected_true)
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
def _check_groupby(df, result, keys, field, f=lambda x: x.sum()):
tups = lmap(tuple, df[keys].values)
tups = com._asarray_tuplesafe(tups)
expected = f(df.groupby(tups)[field])
for k, v in compat.iteritems(expected):
assert (result[k] == v)
| agpl-3.0 |
RRCKI/panda-server | pandaserver/test/datasetCallbackListener.py | 2 | 7517 | import os
import re
import sys
import ssl
import time
import signal
import socket
import commands
import optparse
import datetime
import cPickle as pickle
import stomp
from dq2.common import log as logging
from config import panda_config
from brokerage.SiteMapper import SiteMapper
from dataservice import DataServiceUtils
from dataservice.DDMHandler import DDMHandler
import yaml
import logging
logging.basicConfig(level = logging.DEBUG)
# logger
from pandalogger.PandaLogger import PandaLogger
_logger = PandaLogger().getLogger('datasetCallbackListener')
# keep PID
pidFile = '%s/dataset_callback_listener.pid' % panda_config.logdir
# overall timeout value
overallTimeout = 60 * 59
# expiration time
expirationTime = datetime.datetime.utcnow() + datetime.timedelta(minutes=overallTimeout)
# kill whole process
def catch_sig(sig, frame):
try:
os.remove(pidFile)
except:
pass
# kill
_logger.debug('terminating ...')
commands.getoutput('kill -9 -- -%s' % os.getpgrp())
# exit
sys.exit(0)
# callback listener
class DatasetCallbackListener(stomp.ConnectionListener):
def __init__(self,conn,tb,sm,subscription_id):
# connection
self.conn = conn
# task buffer
self.taskBuffer = tb
# site mapper
self.siteMapper = sm
# subscription ID
self.subscription_id = subscription_id
def on_error(self,headers,body):
_logger.error("on_error : %s" % headers['message'])
def on_disconnected(self,headers,body):
_logger.error("on_disconnected : %s" % headers['message'])
def on_message(self, headers, message):
try:
dsn = 'UNKNOWN'
# send ack
id = headers['message-id']
#self.conn.ack(id,self.subscription_id)
# convert message form str to dict
messageDict = yaml.load(message)
# check event type
if not messageDict['event_type'] in ['datasetlock_ok']:
_logger.debug('%s skip' % messageDict['event_type'])
return
_logger.debug('%s start' % messageDict['event_type'])
messageObj = messageDict['payload']
# only for _dis or _sub
dsn = messageObj['name']
if (re.search('_dis\d+$',dsn) == None) and (re.search('_sub\d+$',dsn) == None):
_logger.debug('%s is not _dis or _sub dataset, skip' % dsn)
return
# take action
scope = messageObj['scope']
site = messageObj['rse']
_logger.debug('%s site=%s type=%s' % (dsn, site, messageDict['event_type']))
thr = DDMHandler(self.taskBuffer,None,site,dsn,scope)
thr.start()
thr.join()
_logger.debug('done %s' % dsn)
except:
errtype,errvalue = sys.exc_info()[:2]
_logger.error("on_message : %s %s" % (errtype,errvalue))
# main
def main(backGround=False):
_logger.debug('starting ...')
# register signal handler
signal.signal(signal.SIGINT, catch_sig)
signal.signal(signal.SIGHUP, catch_sig)
signal.signal(signal.SIGTERM,catch_sig)
signal.signal(signal.SIGALRM,catch_sig)
signal.alarm(overallTimeout)
# forking
pid = os.fork()
if pid != 0:
# watch child process
os.wait()
time.sleep(1)
else:
# main loop
from taskbuffer.TaskBuffer import taskBuffer
# check certificate
certName = '%s/pandasv1_usercert.pem' %panda_config.certdir
keyName = '%s/pandasv1_userkey.pem' %panda_config.certdir
#certName = '/etc/grid-security/hostcert.pem'
_logger.debug('checking certificate {0}'.format(certName))
certOK,certMsg = DataServiceUtils.checkCertificate(certName)
if not certOK:
_logger.error('bad certificate : {0}'.format(certMsg))
# initialize cx_Oracle using dummy connection
from taskbuffer.Initializer import initializer
initializer.init()
# instantiate TB
taskBuffer.init(panda_config.dbhost,panda_config.dbpasswd,nDBConnection=1)
# instantiate sitemapper
siteMapper = SiteMapper(taskBuffer)
# ActiveMQ params
queue = '/queue/Consumer.panda.rucio.events'
ssl_opts = {'use_ssl' : True,
'ssl_version' : ssl.PROTOCOL_TLSv1,
'ssl_cert_file' : certName,
'ssl_key_file' : keyName}
# resolve multiple brokers
brokerList = socket.gethostbyname_ex('atlas-mb.cern.ch')[-1]
# set listener
connList = []
for tmpBroker in brokerList:
try:
clientid = 'PANDA-' + socket.getfqdn() + '-' + tmpBroker
subscription_id = 'panda-server-consumer'
_logger.debug('setting listener %s to broker %s' % (clientid, tmpBroker))
conn = stomp.Connection(host_and_ports = [(tmpBroker, 61023)], **ssl_opts)
connList.append(conn)
except:
errtype,errvalue = sys.exc_info()[:2]
_logger.error("failed to connect to %s : %s %s" % (tmpBroker,errtype,errvalue))
catch_sig(None,None)
while True:
for conn in connList:
try:
if not conn.is_connected():
conn.set_listener('DatasetCallbackListener', DatasetCallbackListener(conn,taskBuffer,siteMapper,
subscription_id))
conn.start()
conn.connect(headers = {'client-id': clientid})
conn.subscribe(destination=queue, id=subscription_id, ack='auto')
_logger.debug('listener %s is up and running' % clientid)
except:
errtype,errvalue = sys.exc_info()[:2]
_logger.error("failed to set listener on %s : %s %s" % (tmpBroker,errtype,errvalue))
catch_sig(None,None)
time.sleep(5)
# entry
if __name__ == "__main__":
optP = optparse.OptionParser(conflict_handler="resolve")
options,args = optP.parse_args()
try:
# time limit
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(seconds=overallTimeout-180)
# get process list
scriptName = sys.argv[0]
out = commands.getoutput('env TZ=UTC ps axo user,pid,lstart,args | grep %s' % scriptName)
for line in out.split('\n'):
items = line.split()
# owned process
if not items[0] in ['sm','atlpan','pansrv','root']: # ['os.getlogin()']: doesn't work in cron
continue
# look for python
if re.search('python',line) == None:
continue
# PID
pid = items[1]
# start time
timeM = re.search('(\S+\s+\d+ \d+:\d+:\d+ \d+)',line)
startTime = datetime.datetime(*time.strptime(timeM.group(1),'%b %d %H:%M:%S %Y')[:6])
# kill old process
if startTime < timeLimit:
_logger.debug("old process : %s %s" % (pid,startTime))
_logger.debug(line)
commands.getoutput('kill -9 %s' % pid)
except:
errtype,errvalue = sys.exc_info()[:2]
_logger.error("kill process : %s %s" % (errtype,errvalue))
# main loop
main()
| apache-2.0 |
gpldecha/gym-square | gym_square/test/test_continuous_state.py | 1 | 1192 | import unittest
import gym
from gym_square.envs.square_continuous_state_env import SquareContinuousStateEnv
from time import sleep
import numpy as np
import matplotlib.cm as cmx
class TestSquareContinuousStateEnv(unittest.TestCase):
def test_continuous_state_env(self):
print('=== Test SquareContinuousStateEnv ===')
env = SquareContinuousStateEnv()
env.reset()
cm = cmx.get_cmap('brg')
env.square_world.reward.set_color_map(cm)
env.square_world.set_agent_state([0.5, 0.5])
bRender = False
if bRender: env.render()
for _ in range(5):
if bRender: env.render()
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
print('act: ', action)
print('obs: ', observation)
print('rew: ', reward)
print('done: ', done)
print(' ')
if done:
print('Episode Finished')
break
if bRender:
sleep(1)
else:
sleep(0.01)
return True
if __name__ == '__main__':
unittest.main()
| mit |
newemailjdm/scipy | scipy/signal/waveforms.py | 64 | 14818 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `width` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
| bsd-3-clause |
RomainBrault/scikit-learn | sklearn/model_selection/_validation.py | 9 | 38821 | """
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.fixes import astype
from ..utils.validation import _is_arraylike, _num_samples
from ..utils.metaestimators import _safe_split
from ..externals.joblib import Parallel, delayed, logger
from ..metrics.scorer import check_scoring
from ..exceptions import FitFailedWarning
from ._split import check_cv
from ..preprocessing import LabelEncoder
__all__ = ['cross_val_score', 'cross_val_predict', 'permutation_test_score',
'learning_curve', 'validation_curve']
def cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv.split(X, y, groups))
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
fit_time = time.time() - start_time
test_score = _score(estimator, X_test, y_test, scorer)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
total_time = score_time + fit_time
end_msg = "%s, total=%s" % (msg, logger.short_format_time(total_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score, test_score] if return_train_score else [test_score]
if return_n_test_samples:
ret.append(_num_samples(X_test))
if return_times:
ret.extend([fit_time, score_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def cross_val_predict(estimator, X, y=None, groups=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
method='predict'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator. For
method='predict_proba', the columns correspond to the classes
in sorted order.
Returns
-------
predictions : ndarray
This is the result of calling ``method``
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Ensure the estimator has implemented the passed decision function
if not callable(getattr(estimator, method)):
raise AttributeError('{} not implemented in estimator'
.format(method))
if method in ['decision_function', 'predict_proba', 'predict_log_proba']:
le = LabelEncoder()
y = le.fit_transform(y)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
prediction_blocks = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params, method)
for train, test in cv.split(X, y, groups))
# Concatenate the predictions
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i
for _, indices_i in prediction_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
else:
predictions = np.concatenate(predictions)
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : string
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
if method in ['decision_function', 'predict_proba', 'predict_log_proba']:
n_classes = len(set(y))
predictions_ = np.zeros((X_test.shape[0], n_classes))
if method == 'decision_function' and len(estimator.classes_) == 2:
predictions_[:, estimator.classes_[-1]] = predictions
else:
predictions_[:, estimator.classes_] = predictions
predictions = predictions_
return predictions, test
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
integer array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(indices) is np.arange(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, dtype=bool)
hit[indices] = True
if not np.all(hit):
return False
return True
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def permutation_test_score(estimator, X, y, groups=None, cv=None,
n_permutations=100, n_jobs=1, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Labels to constrain permutation within groups, i.e. ``y`` values
are permuted among samples with the same group identifier.
When not specified, ``y`` values are permuted among all samples.
When a grouped cross-validator is used, the group labels are
also passed on to the ``split`` method of the cross-validator. The
cross-validator uses them for grouping the samples while splitting
the dataset into train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The p-value, which approximates the probability that the score would
be obtained by chance. This is calculated as:
`(C + 1) / (n_permutations + 1)`
Where C is the number of permutations whose score >= the true score.
The best possible p-value is 1/(n_permutations + 1), the worst is 1.0.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, groups, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, groups, random_state),
groups, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def _permutation_test_score(estimator, X, y, groups, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv.split(X, y, groups):
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
estimator.fit(X_train, y_train)
avg_score.append(scorer(estimator, X_test, y_test))
return np.mean(avg_score)
def _shuffle(y, groups, random_state):
"""Return a shuffled copy of y eventually shuffle among same groups."""
if groups is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(groups))
for group in np.unique(groups):
this_mask = (groups == group)
indices[this_mask] = random_state.permutation(indices[this_mask])
return safe_indexing(y, indices)
def learning_curve(estimator, X, y, groups=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None,
exploit_incremental_learning=False, n_jobs=1,
pre_dispatch="all", verbose=0, shuffle=False,
random_state=None):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
shuffle : boolean, optional
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == 'True'.
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<sphx_glr_auto_examples_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Store it as list as we will be iterating over the list multiple times
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if shuffle:
rng = check_random_state(random_state)
cv_iter = ((rng.permutation(train), test) for train, test in cv_iter)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv_iter)
else:
train_test_proportions = []
for train, test in cv_iter:
for n_train_samples in train_sizes_abs:
train_test_proportions.append((train[:n_train_samples], test))
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train, test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in train_test_proportions)
out = np.array(out)
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, groups=None,
cv=None, scoring=None, n_jobs=1, pre_dispatch="all",
verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`sphx_glr_auto_examples_model_selection_plot_validation_curve.py`
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
# NOTE do not change order of iteration to allow one time cv splitters
for train, test in cv.split(X, y, groups) for v in param_range)
out = np.asarray(out)
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
jereze/scikit-learn | sklearn/tests/test_calibration.py | 213 | 12219 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
| bsd-3-clause |
LangmuirSim/langmuir | LangmuirPython/plot/plotConv.py | 2 | 6004 | # -*- coding: utf-8 -*-
"""
@author: adam
"""
import matplotlib.pyplot as plt
import langmuir as lm
import numpy as np
import argparse
import os
desc = """
Plot output from rdf.py
"""
def get_arguments(args=None):
parser = argparse.ArgumentParser()
parser.description = desc
parser.add_argument(dest='ifile', default='conv.pkl', type=str, nargs='?',
metavar='input', help='input file')
parser.add_argument('--stub', default='', type=str, metavar='stub',
help='output file stub', required=True)
parser.add_argument('--ext', default='pdf', type=str, metavar='str',
choices=['png', 'pdf', 'jpg'], help='output file type')
parser.add_argument('--figure', default=(6.0, 6.0, 1.5, 1.0, 1.0, 1.5),
help='figure dimensions (w, h, l, r, t, b)')
parser.add_argument('--c0', action='store_true', help='x')
parser.add_argument('--c1', action='store_true', help='y')
parser.add_argument('--c2', action='store_true', help='corr')
parser.add_argument('--c3', action='store_true', help='conv')
parser.add_argument('--all', action='store_true', help='all')
parser.add_argument('--title' , default='')
parser.add_argument('--xlabel' , default='$\Delta r$')
parser.add_argument('--ylabel' , default='')
parser.add_argument('--xlim', default=(0, None), nargs=2, type=float)
parser.add_argument('--xmax', default=None, type=float)
parser.add_argument('--xmin', default=None, type=float)
parser.add_argument('--ylim', default=(-1.1, 1.1), nargs=2, type=float)
parser.add_argument('--ymax', default=None, type=float)
parser.add_argument('--ymin', default=None, type=float)
parser.add_argument('--xmult', default=None, type=float,
help='xtick multiple')
parser.add_argument('--ymult', default=None, type=float,
help='ytick multiple')
parser.add_argument('--xmaxn', default=6, type=int, help='xtick maxn')
parser.add_argument('--ymaxn', default=6, type=int, help='ytick maxn')
parser.add_argument('--fontsize' , default='large')
parser.add_argument('--labelsize', default=None)
parser.add_argument('--ticksize' , default=None)
parser.add_argument('--titlesize', default=None)
parser.add_argument('--legend', action='store_true', help='show legend')
parser.add_argument('--legendsize', default='xx-small')
parser.add_argument('--loc', default='best')
parser.add_argument('--show', action='store_true', help='show plot')
parser.add_argument('--save', action='store_true', help='save plot')
opts = parser.parse_args(args)
if opts.titlesize is None: opts.titlesize = opts.fontsize
if opts.labelsize is None: opts.labelsize = opts.fontsize
if opts.ticksize is None: opts.ticksize = opts.fontsize
if opts.xmin: opts.xlim[0] = opts.xmin
if opts.xmax: opts.xlim[1] = opts.xmax
if opts.ymin: opts.ylim[0] = opts.ymin
if opts.ymax: opts.ylim[1] = opts.ymax
opts.xmin, opts.xmax = opts.xlim
opts.ymin, opts.ymax = opts.ylim
if opts.all:
opts.c0 = True
opts.c1 = True
opts.c2 = True
opts.c3 = True
popts = [opts.c0, opts.c1, opts.c2, opts.c3]
if not any(popts):
raise RuntimeError, 'must use --c0, --c1, etc to chose plot type'
if not opts.show and not opts.save:
opts.show = True
return opts
def calculate_bins(array):
bwidth = 1.0
brange = [0, int(np.amax(array))]
if brange[1] % 2 == 0:
pass
else:
brange[1] += 1
bins = int(brange[1] / bwidth)
return bwidth, brange, bins
if __name__ == '__main__':
work = os.getcwd()
opts = get_arguments()
data = lm.common.load_pkl(opts.ifile)
corr = data['corr']
conv = data['conv']
x, y = data['x'], data['y']
grid = lm.grid.Grid(*x.shape)
mesh = lm.grid.PrecalculatedMesh(grid)
bwidth, brange, bins = calculate_bins(mesh.r1)
kwargs = dict(bins=bins, range=brange)
counts_c0, edges = np.histogram(mesh.r1.flat, weights=x.flat, **kwargs)
counts_c1, edges = np.histogram(mesh.r1.flat, weights=y.flat, **kwargs)
counts_c2, edges = np.histogram(mesh.r1.flat, weights=corr.flat, **kwargs)
counts_c3, edges = np.histogram(mesh.r1.flat, weights=conv.flat, **kwargs)
edges = 0.5*(edges[1:] + edges[:-1])
counts_c0 = lm.surface.linear_mapping(counts_c0, -1, 1)
counts_c1 = lm.surface.linear_mapping(counts_c1, -1, 1)
counts_c2 = lm.surface.linear_mapping(counts_c2, -1, 1)
counts_c3 = lm.surface.linear_mapping(counts_c3, -1, 1)
fig, ax1 = lm.plot.subplots(1, 1, *opts.figure)
lm.plot.title(opts.title, fontsize=opts.titlesize)
plt.xlabel(opts.xlabel, size=opts.labelsize)
plt.ylabel(opts.ylabel, size=opts.labelsize)
plt.tick_params(labelsize=opts.ticksize)
if opts.c0:
plt.plot(edges, counts_c0, color=lm.plot.colors.r1, lw=2, label='x')
if opts.c1:
plt.plot(edges, counts_c1, color=lm.plot.colors.b1, lw=2, label='y')
if opts.c2:
plt.plot(edges, counts_c2, color=lm.plot.colors.g2, lw=2, label='corr')
if opts.c3:
plt.plot(edges, counts_c3, color=lm.plot.colors.o1, lw=2, label='conv')
if opts.xmax is None:
opts.xmax = np.amax(mesh.r1.flat)
opts.xlim = (opts.xmin, opts.xmax)
plt.ylim(*opts.ylim)
plt.xlim(*opts.xlim)
lm.plot.maxn_locator(x=opts.xmaxn)
lm.plot.maxn_locator(y=opts.ymaxn)
if opts.xmult: lm.plot.multiple_locator(x=opts.xmult)
if opts.ymult: lm.plot.multiple_locator(y=opts.ymult)
ax1.xaxis.get_offset_text().set_size(opts.ticksize)
ax1.yaxis.get_offset_text().set_size(opts.ticksize)
if opts.legend:
plt.legend(loc=opts.loc, fontsize=opts.legendsize, frameon=False)
if opts.save:
handle = lm.common.format_output(stub=opts.stub, name='',
ext=opts.ext)
print 'saved: %s' % handle
lm.plot.save(handle)
if opts.show:
plt.show() | gpl-2.0 |
datacommonsorg/api-python | datacommons_pandas/__init__.py | 1 | 1715 | # Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datacommons_pandas.df_builder import build_time_series, build_time_series_dataframe, build_multivariate_dataframe
################################ SYMLINK FILES ################################
# We include symlinks to all user-facing functions from the datacommons pkg. #
# This is so that users do not need to import both libraries for pd support. #
# Please keep the below in sync with the __init__.py in the datacommons/ dir #
# TODO: enforce this. https://github.com/datacommonsorg/api-python/issues/149 #
##############################################@################################
# Data Commons SPARQL query support
from datacommons_pandas.query import query
# Data Commons Python API
from datacommons_pandas.core import get_property_labels, get_property_values, get_triples
from datacommons_pandas.places import get_places_in, get_related_places, get_stats
from datacommons_pandas.populations import get_populations, get_observations, get_pop_obs, get_place_obs
from datacommons_pandas.stat_vars import get_stat_value, get_stat_series, get_stat_all
# Other utilities
from datacommons_pandas.utils import set_api_key
| apache-2.0 |
Madhuka/incubator-zeppelin | python/src/main/resources/bootstrap.py | 1 | 5836 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# PYTHON 2 / 3 compatibility :
# bootstrap.py must be runnable with Python 2 or 3
# Remove interactive mode displayhook
import sys
import signal
try:
import StringIO as io
except ImportError:
import io as io
sys.displayhook = lambda x: None
def intHandler(signum, frame): # Set the signal handler
print ("Paragraph interrupted")
raise KeyboardInterrupt()
signal.signal(signal.SIGINT, intHandler)
def help():
print("""%html
<h2>Python Interpreter help</h2>
<h3>Python 2 & 3 compatibility</h3>
<p>The interpreter is compatible with Python 2 & 3.<br/>
To change Python version,
change in the interpreter configuration the python to the
desired version (example : python=/usr/bin/python3)</p>
<h3>Python modules</h3>
<p>The interpreter can use all modules already installed
(with pip, easy_install, etc)</p>
<h3>Forms</h3>
You must install py4j in order to use
the form feature (pip install py4j)
<h4>Input form</h4>
<pre>print (z.input("f1","defaultValue"))</pre>
<h4>Selection form</h4>
<pre>print(z.select("f2", [("o1","1"), ("o2","2")],2))</pre>
<h4>Checkbox form</h4>
<pre> print("".join(z.checkbox("f3", [("o1","1"), ("o2","2")],["1"])))</pre>')
<h3>Matplotlib graph</h3>
<div>The interpreter can display matplotlib graph with
the function z.show()</div>
<div> You need to already have matplotlib module installed
to use this functionality !</div><br/>
<pre>import matplotlib.pyplot as plt
plt.figure()
(.. ..)
z.show(plt)
plt.close()
</pre>
<div><br/> z.show function can take optional parameters
to adapt graph width and height</div>
<div><b>example </b>:
<pre>z.show(plt,width='50px
z.show(plt,height='150px') </pre></div>
<h3>Pandas DataFrame</h3>
<div> You need to have Pandas module installed
to use this functionality (pip install pandas) !</div><br/>
<div>The interpreter can visualize Pandas DataFrame
with the function z.show()
<pre>
import pandas as pd
df = pd.read_csv("bank.csv", sep=";")
z.show(df)
</pre></div>
<h3>SQL over Pandas DataFrame</h3>
<div> You need to have Pandas&Pandasql modules installed
to use this functionality (pip install pandas pandasql) !</div><br/>
<div>Python interpreter group includes %sql interpreter that can query
Pandas DataFrames using SQL and visualize results using Zeppelin Table Display System
<pre>
%python
import pandas as pd
df = pd.read_csv("bank.csv", sep=";")
</pre>
<br />
<pre>
%python.sql
%sql
SELECT * from df LIMIT 5
</pre>
</div>
""")
class PyZeppelinContext(object):
""" If py4j is detected, these class will be override
with the implementation in bootstrap_input.py
"""
errorMsg = "You must install py4j Python module " \
"(pip install py4j) to use Zeppelin dynamic forms features"
def __init__(self):
self.max_result = 1000
def input(self, name, defaultValue=""):
print(self.errorMsg)
def select(self, name, options, defaultValue=""):
print(self.errorMsg)
def checkbox(self, name, options, defaultChecked=[]):
print(self.errorMsg)
def show(self, p, **kwargs):
if hasattr(p, '__name__') and p.__name__ == "matplotlib.pyplot":
self.show_matplotlib(p, **kwargs)
elif type(p).__name__ == "DataFrame": # does not play well with sub-classes
# `isinstance(p, DataFrame)` would req `import pandas.core.frame.DataFrame`
# and so a dependency on pandas
self.show_dataframe(p, **kwargs)
elif hasattr(p, '__call__'):
p() #error reporting
def show_dataframe(self, df, **kwargs):
"""Pretty prints DF using Table Display System
"""
limit = len(df) > self.max_result
header_buf = io.StringIO("")
header_buf.write(str(df.columns[0]))
for col in df.columns[1:]:
header_buf.write("\t")
header_buf.write(str(col))
header_buf.write("\n")
body_buf = io.StringIO("")
rows = df.head(self.max_result).values if limit else df.values
for row in rows:
body_buf.write(str(row[0]))
for cell in row[1:]:
body_buf.write("\t")
body_buf.write(str(cell))
body_buf.write("\n")
body_buf.seek(0); header_buf.seek(0)
#TODO(bzz): fix it, so it shows red notice, as in Spark
print("%table " + header_buf.read() + body_buf.read()) # +
# ("\n<font color=red>Results are limited by {}.</font>" \
# .format(self.max_result) if limit else "")
#)
body_buf.close(); header_buf.close()
def show_matplotlib(self, p, width="100%", height="100%", **kwargs):
"""Matplotlib show function
"""
img = io.StringIO()
p.savefig(img, format="svg")
html = "%html <div style='width:{width};height:{height}'>{image}<div>"
print(html.format(width=width, height=height, image=img.getvalue()))
img.close()
z = PyZeppelinContext()
| apache-2.0 |
iagapov/ocelot | demos/sr/k_diode.py | 2 | 2071 | __author__ = 'Sergey Tomin'
from ocelot.rad import *
from ocelot import *
from ocelot.gui import *
import numpy as np
import time
font = {'size' : 14}
matplotlib.rc('font', **font)
#from scipy.optimize import curve_fit
from ocelot.demos.sr.k_analysis import *
#from ocelot.lib.genera.src.python.radiation import generaSR
font = {'size' : 14}
matplotlib.rc('font', **font)
beam = Beam()
beam.E = 17.5
beam.I = 0.1
und = Undulator(Kx = 4., nperiods = 125, lperiod=0.04, eid= "und")
lat = MagneticLattice((und))
screen = Screen()
screen.z = 500.0
screen.size_x = 0.
screen.size_y = 0.
screen.nx = 1
screen.ny = 1
screen.start_energy = 7950 #eV
screen.end_energy = 8200 #eV
screen.num_energy = 1000
screen = calculate_radiation(lat, screen, beam)
show_flux(screen, unit="mrad")
# K-mono scan
beam_energy = 17.5 # GeV
b_energy_jit = 1e-4 # dE/E
screen = Screen()
screen.z = 500.0
screen.size_x = 0.01
screen.size_y = 0.01
screen.nx = 51
screen.ny = 51
ds = screen.size_x/screen.nx*screen.size_y/screen.ny
n_scan_points = 30
n_shots = 5
scan_Kmono_energy = np.linspace(start=8000, stop=8150, num=n_scan_points)
start = time.time()
flux = []
Etotal = []
for n, eph in enumerate(scan_Kmono_energy):
print(n, "/", n_scan_points)
for i in range(n_shots):
beam.E = np.random.normal(beam_energy, beam_energy*b_energy_jit, 1)
print("beam energy: ", beam.E)
screen.start_energy = eph # 8078.2 - 50 + i*100/30. #eV
screen.num_energy = 1
screen = calculate_radiation(lat, screen, beam)
flux.append(sum(screen.Total)*ds)
Etotal.append(eph)
print("time cpp = ", start - time.time())
e_fin, polynom = data_analysis(Etotal, flux=flux, method="least")
print("Eph_fin = ", e_fin)
x = np.linspace(Etotal[0], Etotal[-1], num=100)
plt.plot(Etotal, flux, "r.", lw =2, label="exp data")
plt.plot(x, polynom(x), "b", label="fit func")
plt.plot(e_fin, polynom(e_fin), "go", lw = 3, label=r"$E_{ph}=$" + str(np.around(e_fin, decimals=2)))
plt.xlabel(r"$E_{ph}$, eV")
plt.grid(True)
plt.legend()
plt.show()
| gpl-3.0 |
phobson/statsmodels | statsmodels/graphics/factorplots.py | 28 | 7596 | # -*- coding: utf-8 -*-
"""
Authors: Josef Perktold, Skipper Seabold, Denis A. Engemann
"""
from statsmodels.compat.python import get_function_name, iterkeys, lrange, zip, iteritems
import numpy as np
from statsmodels.graphics.plottools import rainbow
import statsmodels.graphics.utils as utils
def interaction_plot(x, trace, response, func=np.mean, ax=None, plottype='b',
xlabel=None, ylabel=None, colors=[], markers=[],
linestyles=[], legendloc='best', legendtitle=None,
**kwargs):
"""
Interaction plot for factor level statistics.
Note. If categorial factors are supplied levels will be internally
recoded to integers. This ensures matplotlib compatiblity.
uses pandas.DataFrame to calculate an `aggregate` statistic for each
level of the factor or group given by `trace`.
Parameters
----------
x : array-like
The `x` factor levels constitute the x-axis. If a `pandas.Series` is
given its name will be used in `xlabel` if `xlabel` is None.
trace : array-like
The `trace` factor levels will be drawn as lines in the plot.
If `trace` is a `pandas.Series` its name will be used as the
`legendtitle` if `legendtitle` is None.
response : array-like
The reponse or dependent variable. If a `pandas.Series` is given
its name will be used in `ylabel` if `ylabel` is None.
func : function
Anything accepted by `pandas.DataFrame.aggregate`. This is applied to
the response variable grouped by the trace levels.
plottype : str {'line', 'scatter', 'both'}, optional
The type of plot to return. Can be 'l', 's', or 'b'
ax : axes, optional
Matplotlib axes instance
xlabel : str, optional
Label to use for `x`. Default is 'X'. If `x` is a `pandas.Series` it
will use the series names.
ylabel : str, optional
Label to use for `response`. Default is 'func of response'. If
`response` is a `pandas.Series` it will use the series names.
colors : list, optional
If given, must have length == number of levels in trace.
linestyles : list, optional
If given, must have length == number of levels in trace.
markers : list, optional
If given, must have length == number of lovels in trace
kwargs
These will be passed to the plot command used either plot or scatter.
If you want to control the overall plotting options, use kwargs.
Returns
-------
fig : Figure
The figure given by `ax.figure` or a new instance.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> weight = np.random.randint(1,4,size=60)
>>> duration = np.random.randint(1,3,size=60)
>>> days = np.log(np.random.randint(1,30, size=60))
>>> fig = interaction_plot(weight, duration, days,
... colors=['red','blue'], markers=['D','^'], ms=10)
>>> import matplotlib.pyplot as plt
>>> plt.show()
.. plot::
import numpy as np
from statsmodels.graphics.factorplots import interaction_plot
np.random.seed(12345)
weight = np.random.randint(1,4,size=60)
duration = np.random.randint(1,3,size=60)
days = np.log(np.random.randint(1,30, size=60))
fig = interaction_plot(weight, duration, days,
colors=['red','blue'], markers=['D','^'], ms=10)
import matplotlib.pyplot as plt
#plt.show()
"""
from pandas import DataFrame
fig, ax = utils.create_mpl_ax(ax)
response_name = ylabel or getattr(response, 'name', 'response')
ylabel = '%s of %s' % (get_function_name(func), response_name)
xlabel = xlabel or getattr(x, 'name', 'X')
legendtitle = legendtitle or getattr(trace, 'name', 'Trace')
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
x_values = x_levels = None
if isinstance(x[0], str):
x_levels = [l for l in np.unique(x)]
x_values = lrange(len(x_levels))
x = _recode(x, dict(zip(x_levels, x_values)))
data = DataFrame(dict(x=x, trace=trace, response=response))
plot_data = data.groupby(['trace', 'x']).aggregate(func).reset_index()
# return data
# check plot args
n_trace = len(plot_data['trace'].unique())
if linestyles:
try:
assert len(linestyles) == n_trace
except AssertionError as err:
raise ValueError("Must be a linestyle for each trace level")
else: # set a default
linestyles = ['-'] * n_trace
if markers:
try:
assert len(markers) == n_trace
except AssertionError as err:
raise ValueError("Must be a linestyle for each trace level")
else: # set a default
markers = ['.'] * n_trace
if colors:
try:
assert len(colors) == n_trace
except AssertionError as err:
raise ValueError("Must be a linestyle for each trace level")
else: # set a default
#TODO: how to get n_trace different colors?
colors = rainbow(n_trace)
if plottype == 'both' or plottype == 'b':
for i, (values, group) in enumerate(plot_data.groupby(['trace'])):
# trace label
label = str(group['trace'].values[0])
ax.plot(group['x'], group['response'], color=colors[i],
marker=markers[i], label=label,
linestyle=linestyles[i], **kwargs)
elif plottype == 'line' or plottype == 'l':
for i, (values, group) in enumerate(plot_data.groupby(['trace'])):
# trace label
label = str(group['trace'].values[0])
ax.plot(group['x'], group['response'], color=colors[i],
label=label, linestyle=linestyles[i], **kwargs)
elif plottype == 'scatter' or plottype == 's':
for i, (values, group) in enumerate(plot_data.groupby(['trace'])):
# trace label
label = str(group['trace'].values[0])
ax.scatter(group['x'], group['response'], color=colors[i],
label=label, marker=markers[i], **kwargs)
else:
raise ValueError("Plot type %s not understood" % plottype)
ax.legend(loc=legendloc, title=legendtitle)
ax.margins(.1)
if all([x_levels, x_values]):
ax.set_xticks(x_values)
ax.set_xticklabels(x_levels)
return fig
def _recode(x, levels):
""" Recode categorial data to int factor.
Parameters
----------
x : array-like
array like object supporting with numpy array methods of categorially
coded data.
levels : dict
mapping of labels to integer-codings
Returns
-------
out : instance numpy.ndarray
"""
from pandas import Series
name = None
if isinstance(x, Series):
name = x.name
x = x.values
if x.dtype.type not in [np.str_, np.object_]:
raise ValueError('This is not a categorial factor.'
' Array of str type required.')
elif not isinstance(levels, dict):
raise ValueError('This is not a valid value for levels.'
' Dict required.')
elif not (np.unique(x) == np.unique(list(iterkeys(levels)))).all():
raise ValueError('The levels do not match the array values.')
else:
out = np.empty(x.shape[0], dtype=np.int)
for level, coding in iteritems(levels):
out[x == level] = coding
if name:
out = Series(out)
out.name = name
return out
| bsd-3-clause |
jelee218/2017sejongAI | Week10/category_predictor.py | 1 | 1551 | from sklearn.datasets import fetch_20newsgroups
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
category_map = {'talk.politics.misc': 'Politics', 'rec.autos': 'Autos',
'rec.sport.hockey': 'Hockey', 'sci.electronics': 'Electronics',
'sci.med': 'Medicine'}
training_data = fetch_20newsgroups(subset='train',
categories=category_map.keys(), shuffle=True, random_state=5)
count_vectorizer = CountVectorizer()
train_tc = count_vectorizer.fit_transform(training_data.data)
print("\nDimensions of training data:", train_tc.shape)
tfidf = TfidfTransformer()
train_tfidf = tfidf.fit_transform(train_tc)
input_data = [
"Mr. and Mrs. Dursley, of number four, Privet Drive, were proud to say that they were perfectly normal, thank you very much.",
"They were the last people you'd expect to be involved in anything strange or mysterious, because they just didn't hold with such nonsense.",
"Mr. Dursley was the director of a firm called Grunnings, which madedrills.",
"He was a big, beefy man with hardly any neck, although he did have a very large mustache."
]
classifier = MultinomialNB().fit(train_tfidf, training_data.target)
input_tc = count_vectorizer.transform(input_data)
3/6
input_tfidf = tfidf.transform(input_tc)
predictions = classifier.predict(input_tfidf)
for sent, category in zip(input_data, predictions):
print('\nInput:', sent, '\nPredicted category:', \
category_map[training_data.target_names[category]])
| gpl-3.0 |
liebermeister/flux-enzyme-cost-minimization | scripts/monod_surface.py | 1 | 36182 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 29 14:29:21 2016
A script for drawing the Phase Plane plot for the
glucose/oxygen levels (for the 6 selected EFMs)
@author: noore
"""
import os
import pandas as pd
import numpy as np
from matplotlib import rcParams, cm, pyplot, colors, colorbar
import zipfile
import definitions as D
from prepare_data import read_sweep_zipfile, get_df_from_sweep_zipfile, \
get_general_parameters_from_zipfile
from scipy.interpolate import RectBivariateSpline
import itertools
import seaborn as sns
PREFIX = 'n39-p'
ITER_RANGE = range(31, 48)
REGEX = 'mext-glucoseExt-'
class SweepInterpolator(object):
def __init__(self, data_df, efm_list):
self.efm_list = efm_list or data_df['efm'].unique().tolist()
# interpolate 2D function for each EFM
self.f_interp_dict = {}
max_growth_rate = 0
for efm in self.efm_list:
try:
gr_df = data_df[data_df['efm'] == efm].pivot(index=D.GLU_COL,
columns=D.OX_COL,
values=D.GROWTH_RATE_L)
f = RectBivariateSpline(np.log10(gr_df.index),
np.log10(gr_df.columns),
gr_df)
self.f_interp_dict[efm] = f
max_gr_efm = f(np.log10(D.MAX_CONC['glucoseExt']),
np.log10(D.MAX_CONC['oxygen']))[0, 0]
max_growth_rate = max(max_growth_rate, max_gr_efm)
except ValueError:
print("WARNING: cannot interpolate 2D function for EFM #%04d" % efm)
def calc_gr(self, efm, glucose, oxygen):
return self.f_interp_dict[efm](np.log10(glucose), np.log10(oxygen))[0, 0]
def calc_all_gr(self, glucose, oxygen):
data = [self.calc_gr(efm, glucose, oxygen) for efm in self.efm_list]
return pd.Series(index=self.efm_list, data=data)
@staticmethod
def get_general_params(iter_num):
prefix = '%s%d' % (PREFIX, iter_num)
zip_fname = os.path.join(D.DATA_DIR, '%s.zip' % prefix)
# get the oxygen level from the "metfixed.csv" file inside the zipfile
with zipfile.ZipFile(zip_fname, 'r') as z:
rates_df, params_df, km_df = \
get_general_parameters_from_zipfile(z, prefix)
return rates_df, params_df, km_df
@staticmethod
def get_efm_list_for_knockout(ko):
"""
get the descriptions of the EFMs from one of the sweep
files (doesn't matter which)
"""
rates_df, _, _ = SweepInterpolator.get_general_params(min(ITER_RANGE))
if type(ko) != list:
ko = [ko]
efms_to_keep = rates_df[~rates_df[ko].any(1)].index
return list(efms_to_keep)
def calculate_growth_on_grid(self, ko=None, N=200):
"""
use the interpolations to calculate the growth rate on a NxN grid
"""
glu_grid = np.logspace(np.log10(D.MIN_CONC['glucoseExt']),
np.log10(D.MAX_CONC['glucoseExt']), N)
ox_grid = np.logspace(np.log10(D.MIN_CONC['oxygen']),
np.log10(D.MAX_CONC['oxygen']), N)
if ko:
efms = SweepInterpolator.get_efm_list_for_knockout(ko)
else:
efms = self.efm_list
monod_df = []
for g, o in itertools.product(glu_grid, ox_grid):
growth_rates = [(self.calc_gr(efm, g, o), efm) for efm in efms]
growth_rates.sort(reverse=True)
best_gr, best_efm = growth_rates[0]
monod_df.append((g, o, best_efm, best_gr))
monod_df = pd.DataFrame(monod_df,
columns=[D.GLU_COL, D.OX_COL, 'best_efm', D.GROWTH_RATE_L])
return monod_df
@staticmethod
def interpolate_2D_sweep(efm_list=None):
"""
Combine all glucose and oxygen sweeps into one DataFrame
"""
data_df = get_complete_sweep_data()
return SweepInterpolator(data_df, efm_list)
def get_raw_sweep_data(iter_num):
prefix = '%s%d' % (PREFIX, iter_num)
zip_fname = os.path.join(D.DATA_DIR, '%s.zip' % prefix)
# get the oxygen level from the "metfixed.csv" file inside the zipfile
with zipfile.ZipFile(zip_fname, 'r') as z:
ox_df = pd.read_csv(z.open('%s/metfixed.csv' % prefix, 'r'),
header=None, index_col=0)
ox_conc = ox_df.at['oxygen', 1] # in mM
_, df = get_df_from_sweep_zipfile(zip_fname, REGEX)
df.rename(columns={REGEX: D.GLU_COL}, inplace=True)
df.insert(2, D.OX_COL, float(ox_conc))
return df
def get_sweep_data(iter_num):
prefix = '%s%d' % (PREFIX, iter_num)
zip_fname = os.path.join(D.DATA_DIR, '%s.zip' % prefix)
# get the oxygen level from the "metfixed.csv" file inside the zipfile
with zipfile.ZipFile(zip_fname, 'r') as z:
ox_df = pd.read_csv(z.open('%s/metfixed.csv' % prefix, 'r'),
header=None, index_col=0)
ox_conc = ox_df.at['oxygen', 1] # in mM
df = read_sweep_zipfile(zip_fname, REGEX)
df = pd.melt(df.reset_index(), id_vars='efm', value_name=D.GROWTH_RATE_L)
df.rename(columns={REGEX: D.GLU_COL}, inplace=True)
df.insert(2, D.OX_COL, float(ox_conc))
return df
def cache_complete_sweep_data():
df_list = []
for iter_num in ITER_RANGE:
df_list.append(get_sweep_data(iter_num))
data_df = pd.concat(df_list)
data_df.sort_values(['efm', D.GLU_COL, D.OX_COL], inplace=True)
data_df = data_df[['efm', D.GLU_COL, D.OX_COL, D.GROWTH_RATE_L]]
data_df[D.GLU_COL] = pd.to_numeric(data_df[D.GLU_COL])
data_df[D.OX_COL] = pd.to_numeric(data_df[D.OX_COL])
data_df.to_csv(os.path.join(D.TEMP_DIR, 'sweep2d_gr.csv'))
def get_complete_sweep_data():
sweep_cache_fname = os.path.join(D.TEMP_DIR, 'sweep2d_gr.csv')
if not os.path.exists(sweep_cache_fname):
cache_complete_sweep_data()
return pd.read_csv(sweep_cache_fname)
def get_winning_enzyme_allocations():
df_list = []
for iter_num in ITER_RANGE:
df_std_ox = get_raw_sweep_data(iter_num)
df_std_ox_gr = get_sweep_data(iter_num)
# find the winning EFM in each condition (glucose level)
winning = df_std_ox_gr.sort_values(D.GROWTH_RATE_L, ascending=False).groupby(D.GLU_COL).first().reset_index()
# merge the winning table with the enzyme data table, so that only the
# enzyme allocation data for the winning EFM in each condition is kept
win_enz_df = pd.merge(df_std_ox, winning, on=['efm', D.GLU_COL, D.OX_COL], how='inner')
df_list.append(win_enz_df)
df = pd.concat(df_list)
return df
def write_cache_files():
"""
write all relevant cache files
"""
# 1) the growth rates for each triplet: EFM, glucose, oxygen
cache_complete_sweep_data()
# 2) for each glucose and oxygen pair, find the EFM with the maximal
# growth rate, and keep only its enzyme allocation values
sweep2d_win_enzymes = get_winning_enzyme_allocations()
sweep2d_win_enzymes.to_csv(os.path.join(D.TEMP_DIR, 'sweep2d_win_enzymes.csv'))
# 3) after interpolating the g.r. for each EFM over a 200x200 2D grid
# find the EFM with the maximal growth rate (best_efm)
f_interp_dict = SweepInterpolator.interpolate_2D_sweep()
kos = [(None, None),
('R60', 'ed'),
('R3', 'emp'),
(D.R_OXYGEN_DEPENDENT, 'oxphos')]
for ko, name in kos:
if name is None:
fname = os.path.join(D.TEMP_DIR, 'sweep2d_win_200x200.csv')
else:
fname = os.path.join(D.TEMP_DIR, 'sweep2d_%sko_win_200x200.csv' % name)
sweep2d_grid = f_interp_dict.calculate_growth_on_grid(ko)
sweep2d_grid.to_csv(fname)
def interpolate_single_condition(glucose=None, oxygen=None):
interpolator = SweepInterpolator.interpolate_2D_sweep()
glucose = glucose or D.STD_CONC['glucoseExt']
oxygen = oxygen or D.STD_CONC['oxygen']
data_df = interpolator.calc_all_gr(glucose, oxygen)
return data_df
def plot_growth_rate_hist(glucose=None, oxygen=None, ax=None):
glucose = glucose or D.STD_CONC['glucoseExt']
oxygen = oxygen or D.STD_CONC['oxygen']
data_df = interpolate_single_condition(glucose, oxygen)
if ax is not None:
bins = np.linspace(0, 0.8, 20)
sns.distplot(data_df, ax=ax, bins=bins,
color=D.BAR_COLOR, kde=False)
ax.set_title('[glu] = %g mM, [O$_2$] = %g mM' % (glucose, oxygen))
ax.set_xlabel(D.GROWTH_RATE_L)
ax.set_ylabel('no. of EFMs')
ax.set_xlim(0, None)
def allocation_pie_chart(ax, glucose=100.0, oxygen=3.7e-3):
win_enz_df = pd.read_csv(
os.path.join(D.TEMP_DIR, 'sweep2d_win_enzymes.csv'))
glu = sorted(win_enz_df[D.GLU_COL].unique(), key=lambda x: (x-glucose)**2)[0]
ox = sorted(win_enz_df[D.OX_COL].unique(), key=lambda x: (x-oxygen)**2)[0]
enz = win_enz_df[(win_enz_df[D.GLU_COL] == glu) & (win_enz_df[D.OX_COL] == ox)]
efm = enz['efm'].unique()[0]
gr = enz[D.GROWTH_RATE_L].unique()[0]
E_i = enz.set_index('reaction')['E_i'].sort_values(ascending=False)
E_i = E_i / E_i.sum()
E_lumped = E_i.drop(E_i[E_i.cumsum() > 0.95].index)
E_lumped.loc[D.REMAINDER_L] = E_i[E_i.cumsum() > 0.95].sum()
E_lumped.name = ''
E_lumped.plot.pie(colors=list(map(D.reaction_to_rgb, E_lumped.index)),
labels=list(map(D.GET_REACTION_NAME, E_lumped.index)),
ax=ax)
if efm in D.efm_dict:
efm_name = D.efm_dict[efm]['label']
else:
efm_name = '%d' % efm
ax.set_title('[glu] = %g mM, [O$_2$] = %g mM\nbest EFM is %s, %s = %.2f' %
(glucose, oxygen, efm_name, D.GROWTH_RATE_L, gr))
return efm
def plot_surface(ax, figdata,
z=D.GROWTH_RATE_L, c=D.GROWTH_RATE_L, cmap=None, vmax=None,
sweep_cache_fname='sweep2d_win_200x200.csv'):
"""
plot a 3D surface plot of the 2D-sweep axes, with growth rate (by default)
as the z-axis. One can either use color to indicate hight, or overlay the
mesh with another color based on a 4th parameter.
"""
monod_df, axis_params = get_monod_data(sweep_cache_fname)
#X = np.log10(monod_df[D.GLU_COL].as_matrix().reshape(200, 200).T)
#Y = np.log10(monod_df[D.OX_COL].as_matrix().reshape(200, 200).T)
monod_df = monod_df.join(figdata, on='best_efm', rsuffix='_')
X = np.arange(0, axis_params[D.GLU_COL]['N'])
Y = np.arange(0, axis_params[D.OX_COL]['N'])
X, Y = np.meshgrid(X, Y)
# create matrix-style DataFrames for the growth rate and oxygen uptake rate
z_mat = monod_df.pivot(index=D.GLU_COL, columns=D.OX_COL, values=z).T.as_matrix()
cmap = cmap or cm.magma_r
if z == c: # make a standard surface plot with gridlines and big strides
vmax = vmax or z_mat.max().max()
ax.plot_surface(X, Y, z_mat, rstride=1, cstride=1, cmap=cmap,
antialiased=False, rasterized=True,
linewidth=0, vmin=0, vmax=vmax, shade=False)
else: # use a different matrix for the color coding of the surface
c_mat = monod_df.pivot(index=D.GLU_COL, columns=D.OX_COL, values=c).T.as_matrix()
vmax = vmax or c_mat.max().max()
c_colors = np.empty((X.shape[1], X.shape[0], 4), dtype=float)
for ox in range(X.shape[1]):
for gl in range(X.shape[0]):
c_colors[ox, gl, :] = cmap(c_mat[ox, gl] / vmax)
ax.plot_surface(X, Y, z_mat, facecolors=c_colors,
antialiased=False, rasterized=True,
rstride=1, cstride=1, linewidth=0, shade=False)
sm = cm.ScalarMappable(cmap=cmap, norm=pyplot.Normalize(vmin=0, vmax=vmax))
sm._A = []
pyplot.colorbar(sm, ax=ax, fraction=0.07, shrink=0.5, label=c)
ax.plot_wireframe(X, Y, z_mat, rstride=6, cstride=6, linewidth=0.2,
edgecolor='k', alpha=0.3)
ax.set_xticks(axis_params[D.GLU_COL]['xticks'])
ax.set_xticklabels(axis_params[D.GLU_COL]['xticklabels'])
ax.set_yticks(axis_params[D.OX_COL]['xticks'])
ax.set_yticklabels(axis_params[D.OX_COL]['xticklabels'])
ax.set_xlabel(D.GLU_COL)
ax.set_ylabel(D.OX_COL)
ax.set_zlabel(z, rotation=90)
ax.view_init(20, -120)
def plot_surface_diff(ax, ko_cache_fname, wt_cache_fname='sweep2d_win_200x200.csv'):
monod_df, axis_params = get_monod_data(wt_cache_fname)
wt_gr_mat = monod_df.pivot(index=D.GLU_COL, columns=D.OX_COL,
values=D.GROWTH_RATE_L).T.as_matrix()
monod_df, axis_params = get_monod_data(ko_cache_fname)
ko_gr_mat = monod_df.pivot(index=D.GLU_COL, columns=D.OX_COL,
values=D.GROWTH_RATE_L).T.as_matrix()
X = np.arange(0, axis_params[D.GLU_COL]['N'])
Y = np.arange(0, axis_params[D.OX_COL]['N'])
X, Y = np.meshgrid(X, Y)
ax.plot_surface(X, Y, ko_gr_mat,
rstride=6, cstride=6, cmap='Oranges',
linewidth=0.25, edgecolors='r',
vmin=0, vmax=0.7)
ax.plot_wireframe(X, Y, wt_gr_mat,
rstride=6, cstride=6, linewidth=0.5,
colors=(0.1, 0.1, 0.6), alpha=1)
ax.set_xticks(axis_params[D.GLU_COL]['xticks'])
ax.set_xticklabels(axis_params[D.GLU_COL]['xticklabels'])
ax.set_yticks(axis_params[D.OX_COL]['xticks'])
ax.set_yticklabels(axis_params[D.OX_COL]['xticklabels'])
ax.set_xlabel(D.GLU_COL)
ax.set_ylabel(D.OX_COL)
ax.set_zlabel(D.GROWTH_RATE_L, rotation=90)
ax.view_init(20, -120)
def plot_heatmap_diff(ax, cache_fname1, cache_fname2, vmax=1):
monod1_df, wt_axis_params = get_monod_data(cache_fname1)
gr1_mat = monod1_df.pivot(index=D.GLU_COL, columns=D.OX_COL, values=D.GROWTH_RATE_L).T
monod2_df, ko_axis_params = get_monod_data(cache_fname2)
gr2_mat = monod2_df.pivot(index=D.GLU_COL, columns=D.OX_COL, values=D.GROWTH_RATE_L).T
pcol = ax.imshow(np.log2(gr1_mat) - np.log2(gr2_mat),
interpolation='none', cmap='bwr', vmin=-vmax, vmax=vmax,
origin='lower', aspect=1)
pyplot.colorbar(pcol, ax=ax, label=r'log$_2$ fold change',
fraction=0.1)
ax.set_xlabel(D.GLU_COL)
ax.set_ylabel(D.OX_COL)
ax.set_xticks(wt_axis_params[D.GLU_COL]['xticks'])
ax.set_xticklabels(wt_axis_params[D.GLU_COL]['xticklabels'])
ax.set_yticks(wt_axis_params[D.OX_COL]['xticks'])
ax.set_yticklabels(wt_axis_params[D.OX_COL]['xticklabels'])
def plot_heatmap(ax, wt_cache_fname='sweep2d_win_200x200.csv', vmax=None):
wt_monod_df, wt_axis_params = get_monod_data(wt_cache_fname)
wt_gr_mat = wt_monod_df.pivot(index=D.GLU_COL,
columns=D.OX_COL, values=D.GROWTH_RATE_L).T
pcol = ax.imshow(wt_gr_mat,
interpolation='none', cmap='magma_r', vmin=0, vmax=vmax,
origin='lower', aspect=1)
pyplot.colorbar(pcol, ax=ax, label=r'growth rate [h$^-1$]',
fraction=0.1)
ax.set_xlabel(D.GLU_COL)
ax.set_ylabel(D.OX_COL)
ax.set_xticks(wt_axis_params[D.GLU_COL]['xticks'])
ax.set_xticklabels(wt_axis_params[D.GLU_COL]['xticklabels'])
ax.set_yticks(wt_axis_params[D.OX_COL]['xticks'])
ax.set_yticklabels(wt_axis_params[D.OX_COL]['xticklabels'])
def get_monod_data(sweep_cache_fname='sweep2d_win_200x200.csv'):
monod_df = pd.read_csv(os.path.join(D.TEMP_DIR, sweep_cache_fname))
# construct the bitmap by assigning the color of the winning EFM to each
# pixel
monod_df['hexcolor'] = monod_df['best_efm'].apply(D.efm_to_hex)
standard_conc = {D.GLU_COL: 100.0, D.OX_COL: 3.7}
ticks = {D.GLU_COL: [0, 44, 88, 133, 177],
D.OX_COL: [0, 50, 100, 150, 199]}
axis_params = {}
for col in [D.GLU_COL, D.OX_COL]:
axis_params[col] = {}
levels = sorted(monod_df[col].unique())
axis_params[col]['N'] = len(levels)
axis_params[col]['min'] = monod_df[col].min()
axis_params[col]['max'] = monod_df[col].max()
x_std = np.log(standard_conc[col])
x_min = np.log(monod_df[col].min())
x_max = np.log(monod_df[col].max())
axis_params[col]['std_ind'] = len(levels) * (x_std - x_min) / (x_max - x_min)
axis_params[col]['xticks'] = ticks[col]
tickvalues = [levels[i] for i in ticks[col]]
axis_params[col]['xticklabels'] = map(D.as_base10_exp, tickvalues)
return monod_df, axis_params
def plot_monod_surface(figure_data, sweep_cache_fname='sweep2d_win_200x200.csv'):
monod_df, axis_params = get_monod_data(sweep_cache_fname)
max_growth_rate = monod_df[D.GROWTH_RATE_L].max()
figS12, axS12 = pyplot.subplots(3, 3, figsize=(12, 12))
cbar_ax = figS12.add_axes([.72, .75, .02, .2])
# create a bitmap to be used with imshow
hexcolor_df = monod_df.pivot(index=D.GLU_COL,
columns=D.OX_COL,
values='hexcolor')
best_efm_color = np.zeros((axis_params[D.OX_COL]['N'],
axis_params[D.GLU_COL]['N'], 3))
for i, g in enumerate(hexcolor_df.index):
for j, o in enumerate(hexcolor_df.columns):
best_efm_color[j, i, :] = colors.hex2color(hexcolor_df.at[g, o])
axS12[0, 0].imshow(best_efm_color, interpolation='none', origin='lower')
axS12[0, 0].set_xlabel(D.GLU_COL)
axS12[0, 0].set_ylabel(D.OX_COL)
axS12[0, 0].set_xticks(axis_params[D.GLU_COL]['xticks'])
axS12[0, 0].set_xticklabels(axis_params[D.GLU_COL]['xticklabels'])
axS12[0, 0].set_yticks(axis_params[D.OX_COL]['xticks'])
axS12[0, 0].set_yticklabels(axis_params[D.OX_COL]['xticklabels'])
# convert the standard glucose concentration to the imshow coordinates
# we know that the minimum level is mapped to 0
# and the maximum level is mapped to N
# and that it is a logarithmic scale
axS12[0, 0].plot([axis_params[D.GLU_COL]['std_ind'], axis_params[D.GLU_COL]['std_ind']],
[0, axis_params[D.OX_COL]['N']-1],
'--', color='grey', linewidth=1)
axS12[0, 0].plot([0, axis_params[D.GLU_COL]['N']-1],
[axis_params[D.OX_COL]['std_ind'], axis_params[D.OX_COL]['std_ind']],
'--', color='grey', linewidth=1 )
# mark the 3 selected EFMs in the Monod surface plot
axS12[0, 0].annotate('max-gr', xy=(0.5, 0.8),
xycoords='axes fraction', ha='left', va='top',
size=14, color='k')
axS12[0, 0].annotate('pareto', xy=(0.1, 0.4),
xycoords='axes fraction', ha='left', va='top',
size=14, color='k')
axS12[0, 0].annotate('ana-lac', xy=(0.73, 0.1),
xycoords='axes fraction', ha='left', va='top',
size=14, color='k')
axS12[0, 0].annotate('aero-ace', xy=(0.82, 0.29),
xycoords='axes fraction', ha='left', va='top',
size=14, color='k')
best_efm_gr_df = monod_df.pivot(index=D.GLU_COL,
columns=D.OX_COL,
values=D.GROWTH_RATE_L)
axS12[0, 1].set_xlabel(best_efm_gr_df.index.name)
axS12[0, 1].set_xticks(axis_params[D.GLU_COL]['xticks'])
axS12[0, 1].set_xticklabels(axis_params[D.GLU_COL]['xticklabels'])
axS12[0, 1].get_yaxis().set_visible(False)
pcol = axS12[0, 1].imshow(best_efm_gr_df.T, interpolation='spline16',
cmap='Oranges', vmin=0,
vmax=max_growth_rate, origin='lower')
norm = colors.Normalize(vmin=0, vmax=max_growth_rate)
colorbar.ColorbarBase(cbar_ax, cmap='Oranges', norm=norm)
cbar_ax.set_title(D.GROWTH_RATE_L, loc='center')
for i, efm in enumerate(sorted(monod_df['best_efm'].unique())):
if efm in D.efm_dict:
label = 'EFM %04d (%s)' % (efm, D.efm_dict[efm]['label'])
else:
label = 'EFM %04d' % efm
axS12[0, 2].plot([0, 1], [i, i],
label=label, color=D.efm_to_hex(efm), linewidth=3)
axS12[0, 2].set_xlim(-1, 0)
axS12[0, 2].set_ylim(-1, 0)
axS12[0, 2].get_xaxis().set_visible(False)
axS12[0, 2].get_yaxis().set_visible(False)
axS12[0, 2].legend(fontsize=10, labelspacing=0.1, loc='center right')
axS12[0, 2].axis('off')
# make a Monod surface plot where certain features of the winning EFMs
# are presented in color coding
plot_parameters = [
{'c': D.YIELD_L, 'cmap': 'magma_r', 'vmin': 0, 'vmax': 30 , 'ax': axS12[1, 0]},
{'c': D.OXYGEN_L, 'cmap': 'magma_r', 'vmin': 0, 'vmax': 0.7, 'ax': axS12[1, 1]},
{'c': D.ACE_L, 'cmap': 'magma_r', 'vmin': 0, 'vmax': 1.5, 'ax': axS12[1, 2]},
{'c': D.LACTATE_L, 'cmap': 'magma_r', 'vmin': 0, 'vmax': 1.5, 'ax': axS12[2, 0]},
{'c': D.ED_L, 'cmap': 'magma_r', 'vmin': 0, 'vmax': 2 , 'ax': axS12[2, 1]},
{'c': D.PPP_L, 'cmap': 'magma_r', 'vmin': 0, 'vmax': 4.5, 'ax': axS12[2, 2]},
]
pareto_data_df = figure_data['standard']
for i, d in enumerate(plot_parameters):
ax = d['ax']
ax.set_title(d['c'])
df = monod_df.join(pareto_data_df[d['c']], on='best_efm')
df = df.pivot(index=D.GLU_COL,
columns=D.OX_COL,
values=d['c'])
ax.set_xlabel(df.index.name)
ax.set_ylabel(df.columns.name)
pcol = ax.imshow(df.T, interpolation='none', cmap=d['cmap'],
origin='lower', vmin=d['vmin'], vmax=d['vmax'])
pyplot.colorbar(pcol, ax=ax, shrink=0.6)
# since the plot is made in a linear scale, we need to "manually" convert
# the ticks to the log-scale using the index and columns of 'df'
ax.set_xticks(axis_params[D.GLU_COL]['xticks'])
ax.set_xticklabels(axis_params[D.GLU_COL]['xticklabels'])
ax.set_yticks(axis_params[D.OX_COL]['xticks'])
ax.set_yticklabels(axis_params[D.OX_COL]['xticklabels'])
axS12[0, 0].annotate('a', xy=(0.02, 0.98),
xycoords='axes fraction', ha='left', va='top',
size=20, color='white')
axS12[0, 1].annotate('b', xy=(0.02, 0.98),
xycoords='axes fraction', ha='left', va='top',
size=20, color='black')
for i, ax in enumerate(axS12.flat):
if i == 2:
continue
if i in [0, 3, 4]:
color = 'w'
else:
color = 'k'
letter = chr(ord('a') + (i if i < 2 else i-1))
ax.annotate(letter, xy=(0.02, 0.98),
xycoords='axes fraction', ha='left', va='top',
size=20, color=color)
axS12[1, 1].get_yaxis().set_visible(False)
axS12[1, 2].get_yaxis().set_visible(False)
axS12[2, 1].get_yaxis().set_visible(False)
axS12[2, 2].get_yaxis().set_visible(False)
return figS12
def plot_conc_versus_uptake_figure(figure_data,
sweep_cache_fname='sweep2d_win_200x200.csv'):
"""
in order to compare to FBA predictions
join the Monod surface data with the EFM rates table, in order to
get specific rates for each winning EFM
"""
monod_df, axis_params = get_monod_data(sweep_cache_fname)
best_efm_hex = monod_df.pivot(index=D.GLU_COL,
columns=D.OX_COL,
values='hexcolor')
best_efm_color = np.zeros((best_efm_hex.shape[1], best_efm_hex.shape[0], 3))
for i, g in enumerate(best_efm_hex.index):
for j, o in enumerate(best_efm_hex.columns):
hexcolor = best_efm_hex.at[g, o]
best_efm_color[j, i, :] = colors.hex2color(hexcolor)
fig = pyplot.figure(figsize=(8, 8))
ax_list = []
##################### Monod surface plot of winning EFMs ##################
ax = fig.add_subplot(2, 2, 1)
ax_list.append(ax)
ax.imshow(best_efm_color, interpolation='none', origin='lower')
ax.set_xticks(axis_params[D.GLU_COL]['xticks'])
ax.set_xticklabels(axis_params[D.GLU_COL]['xticklabels'])
ax.set_yticks(axis_params[D.OX_COL]['xticks'])
ax.set_yticklabels(axis_params[D.OX_COL]['xticklabels'])
ax.set_xlabel(D.GLU_COL)
ax.set_ylabel(D.OX_COL)
################### growth rate surface plot vs concentrations ############
ax = fig.add_subplot(2, 2, 2, projection='3d', facecolor='white')
ax_list.append(ax)
X = np.arange(0, axis_params[D.GLU_COL]['N'])
Y = np.arange(0, axis_params[D.OX_COL]['N'])
X, Y = np.meshgrid(X, Y)
z_mat = monod_df.pivot(index=D.GLU_COL, columns=D.OX_COL, values=D.GROWTH_RATE_L).T.as_matrix()
ax.plot_surface(X, Y, z_mat, facecolors=best_efm_color,
rstride=1, cstride=1,
antialiased=False, rasterized=True,
linewidth=0, shade=False)
ax.plot_wireframe(X, Y, z_mat, rstride=6, cstride=6, linewidth=0.2,
edgecolor='k', alpha=0.3)
ax.view_init(20, -120)
ax.set_xticks(axis_params[D.GLU_COL]['xticks'])
ax.set_xticklabels(axis_params[D.GLU_COL]['xticklabels'])
ax.set_yticks(axis_params[D.OX_COL]['xticks'])
ax.set_yticklabels(axis_params[D.OX_COL]['xticklabels'])
ax.set_xlabel(D.GLU_COL)
ax.set_ylabel(D.OX_COL)
ax.set_zlabel(D.GROWTH_RATE_L, rotation=90)
###########################################################################
OX_UPTAKE_L = 'O$_2$ uptake rate (a.u.)'
GLU_UPRATE_L = 'glucose uptake rate (a.u.)'
rates_df, _, _ = SweepInterpolator.get_general_params(min(ITER_RANGE))
monod_df = monod_df.join(rates_df, on='best_efm')
monod_df[OX_UPTAKE_L] = monod_df[D.R_OXYGEN_DEPENDENT].sum(1) * monod_df[D.GROWTH_RATE_L]
monod_df[OX_UPTAKE_L] = monod_df[OX_UPTAKE_L].round(0)
monod_df[GLU_UPRATE_L] = monod_df[D.R_GLUCOSE_IN] * monod_df[D.GROWTH_RATE_L]
monod_df[GLU_UPRATE_L] = monod_df[GLU_UPRATE_L].round(0)
monod_df[D.GROWTH_RATE_L] = monod_df[D.GROWTH_RATE_L].round(3)
small_monod_df = monod_df[[GLU_UPRATE_L, OX_UPTAKE_L, D.GROWTH_RATE_L, 'hexcolor']].drop_duplicates()
small_monod_df.sort_values(D.GROWTH_RATE_L, inplace=True)
########## 2D scatter plot of uptake rates (winning EFM as color) #########
ax = fig.add_subplot(2, 2, 3)
ax_list.append(ax)
ax.scatter(x=small_monod_df[GLU_UPRATE_L],
y=small_monod_df[OX_UPTAKE_L],
s=15, c=small_monod_df['hexcolor'],
linewidth=0)
ax.set_xlabel(GLU_UPRATE_L)
ax.set_ylabel(OX_UPTAKE_L)
############## 3D scatter plot of growth rate vs uptake rates #############
ax = fig.add_subplot(2, 2, 4, projection='3d')
ax_list.append(ax)
ax.scatter(xs=small_monod_df[GLU_UPRATE_L],
ys=small_monod_df[OX_UPTAKE_L],
zs=small_monod_df[D.GROWTH_RATE_L],
s=15, c=small_monod_df['hexcolor'],
cmap='Oranges', vmax=0.7, linewidth=0,
alpha=1)
ax.set_xlabel(GLU_UPRATE_L, labelpad=10)
ax.set_ylabel(OX_UPTAKE_L, labelpad=10)
ax.set_zlabel(D.GROWTH_RATE_L, labelpad=10)
ax.view_init(20, -120)
for i, ax in enumerate(ax_list):
ax.annotate(chr(ord('a')+i), xy=(0.98, 0.98), xycoords='axes fraction',
fontsize=20, ha='right', va='top')
return fig
def plot_oxygen_sweep(ax, glucose_conc=None, N=200,
legend_loc='lower right', legend_fontsize=10):
"""make line plots of gr vs one of the axes (oxygen or glucose)"""
if glucose_conc is None:
glucose_conc = D.STD_CONC['glucoseExt']
ox_grid = np.logspace(np.log10(D.MIN_CONC['oxygen']),
np.log10(D.MAX_CONC['oxygen']),
N)
interp_data_df = pd.DataFrame(index=ox_grid, columns=D.efm_dict.keys())
interpolator = SweepInterpolator.interpolate_2D_sweep(D.efm_dict.keys())
for efm in interp_data_df.columns:
interp_data_df[efm] = [interpolator.calc_gr(efm, glucose_conc, o)
for o in ox_grid]
colors, labels = zip(*D.efm_dict.values())
interp_data_df.plot(kind='line', ax=ax, linewidth=2, color=colors)
ax.legend(labels,
loc=legend_loc, fontsize=legend_fontsize, labelspacing=0.2)
ax.set_xscale('log')
ax.set_xlabel(D.OX_COL)
ax.set_ylabel(r'growth rate [h$^{-1}$]')
ax.set_ylim([0, None])
# mark the line where 'standard' oxygen levels are
std_ox = D.STD_CONC['oxygen']
ax.plot([std_ox, std_ox], ax.get_ylim(), '--', color='grey', linewidth=1)
ax.text(std_ox, ax.get_ylim()[1], ' std. $O_2$', ha='center', va='bottom',
color='grey', fontsize=14)
ax.text(0.02, 0.6, 'glucose (%d mM)' % glucose_conc, ha='left', va='center',
rotation=90, fontsize=14, color='grey', transform=ax.transAxes)
def plot_glucose_sweep(ax, oxygen_conc=None, N=200, ylim=None,
legend_loc='upper left', legend_fontsize=10,
mark_glucose=True):
"""make line plots of gr vs one of the axes (oxygen or glucose)"""
if oxygen_conc is None:
oxygen_conc = D.STD_CONC['oxygen']
glu_grid = np.logspace(np.log10(D.MIN_CONC['glucoseExt']),
np.log10(D.MAX_CONC['glucoseExt']),
N)
interp_data_df = pd.DataFrame(index=glu_grid, columns=D.efm_dict.keys())
interpolator = SweepInterpolator.interpolate_2D_sweep(D.efm_dict.keys())
for efm in interp_data_df.columns:
interp_data_df[efm] = [interpolator.calc_gr(efm, g, oxygen_conc)
for g in glu_grid]
colors, labels = zip(*D.efm_dict.values())
interp_data_df.plot(kind='line', ax=ax, linewidth=2, color=colors)
if legend_loc is not None:
ax.legend(labels,
loc=legend_loc, fontsize=legend_fontsize, labelspacing=0.2)
else:
ax.legend().remove()
ax.set_xscale('log')
ax.set_xlabel(D.GLU_COL)
ax.set_ylabel(r'growth rate [h$^{-1}$]')
if ylim is None:
ax.set_ylim([0, None])
else:
ax.set_ylim(ylim)
if mark_glucose:
# mark the line where 'standard' oxygen levels are
std_ox = D.STD_CONC['glucoseExt']
ax.plot([std_ox, std_ox], ax.get_ylim(), '--', color='grey', linewidth=1)
ax.text(std_ox, ax.get_ylim()[1], ' std. glucose', ha='center', va='bottom',
color='grey', fontsize=14)
ax.text(0.02, 0.6, '$O_2$ (%g mM)' % oxygen_conc, ha='left', va='center',
rotation=90, fontsize=14, color='grey', transform=ax.transAxes)
def get_glucose_sweep_df(oxygen_conc=None, efm_list=None, N=200):
if oxygen_conc is None:
oxygen_conc = D.STD_CONC['oxygen']
glu_grid = np.logspace(np.log10(D.MIN_CONC['glucoseExt']),
np.log10(D.MAX_CONC['glucoseExt']),
N)
interpolator = SweepInterpolator.interpolate_2D_sweep(efm_list)
interp_data_df = pd.DataFrame(index=glu_grid,
columns=interpolator.efm_list)
for efm in interpolator.efm_list:
interp_data_df[efm] = [interpolator.calc_gr(efm, g, oxygen_conc)
for g in glu_grid]
return interp_data_df
def get_anaerobic_glucose_sweep_df(figure_data, N=200):
anaerobic_sweep_data_df = figure_data['monod_glucose_anae'].drop(9999)
# filter all EMFs that have a > 1% drop in the function (it should be
# completely monotonic, but some numerical errors should be okay).
non_monotinic = (np.log(anaerobic_sweep_data_df).diff(axis=1) < 0)
anaerobic_sweep_data_df[non_monotinic] = np.nan
glu_grid = np.logspace(np.log10(D.MIN_CONC['glucoseExt']),
np.log10(D.MAX_CONC['glucoseExt']),
N)
interp_df = anaerobic_sweep_data_df.transpose()
interp_df = interp_df.append(
pd.DataFrame(index=glu_grid, columns=anaerobic_sweep_data_df.index))
interp_df = interp_df[~interp_df.index.duplicated(keep='first')]
interp_df.sort_index(inplace=True)
interp_df.index = np.log(interp_df.index)
interpolated_df = interp_df.interpolate(method='polynomial', order=3)
interpolated_df.index = np.exp(interpolated_df.index)
return interpolated_df
def plot_oxygen_dual_pareto(data_df, ax, s=9,
std_ox=None, low_ox=None, std_glu=None,
draw_lines=True):
std_ox = std_ox or D.STD_CONC['oxygen']
low_ox = low_ox or D.LOW_CONC['oxygen']
std_glu = std_glu or D.STD_CONC['glucoseExt']
std_ox_df = pd.DataFrame(index=data_df.index,
columns=[D.GROWTH_RATE_L, D.YIELD_L])
std_ox_df[D.YIELD_L] = data_df[D.YIELD_L]
low_ox_df = pd.DataFrame(index=data_df.index,
columns=[D.GROWTH_RATE_L, D.YIELD_L])
low_ox_df[D.YIELD_L] = data_df[D.YIELD_L]
# calculate the growth rates in the lower oxygen level, using the
# interpolated functions
interpolator = SweepInterpolator.interpolate_2D_sweep()
for efm in data_df.index:
std_ox_df.at[efm, D.GROWTH_RATE_L] = \
interpolator.calc_gr(efm, std_glu, std_ox)
low_ox_df.at[efm, D.GROWTH_RATE_L] = \
interpolator.calc_gr(efm, std_glu, low_ox)
D.plot_dual_pareto(std_ox_df, 'std. O$_2$ (0.21 mM)',
low_ox_df, 'low O$_2$ (%g mM)' % low_ox,
s=s, ax=ax, x=D.YIELD_L, y=D.GROWTH_RATE_L,
draw_lines=draw_lines)
ax.set_xlim(-1e-3, None)
ax.set_ylim(-1e-3, None)
def plot_glucose_dual_pareto(data_df, ax,
std_glu=None, low_glu=None, std_ox=None,
draw_lines=True):
std_glu = std_glu or D.STD_CONC['glucoseExt']
low_glu = low_glu or D.LOW_CONC['glucoseExt']
std_ox = std_ox or D.STD_CONC['oxygen']
std_glu_df = pd.DataFrame(index=data_df.index,
columns=[D.GROWTH_RATE_L, D.YIELD_L])
std_glu_df[D.YIELD_L] = data_df[D.YIELD_L]
low_glu_df = pd.DataFrame(index=data_df.index,
columns=[D.GROWTH_RATE_L, D.YIELD_L])
low_glu_df[D.YIELD_L] = data_df[D.YIELD_L]
# calculate the growth rates in the lower oxygen level, using the
# interpolated functions
interpolator = SweepInterpolator.interpolate_2D_sweep()
for efm in data_df.index:
std_glu_df.at[efm, D.GROWTH_RATE_L] = \
interpolator.calc_gr(efm, std_glu, std_ox)
low_glu_df.at[efm, D.GROWTH_RATE_L] = \
interpolator.calc_gr(efm, low_glu, std_ox)
D.plot_dual_pareto(std_glu_df, 'std. glucose (100 mM)',
low_glu_df, 'low glucose (%g mM)' % low_glu,
s=9, ax=ax, x=D.YIELD_L, y=D.GROWTH_RATE_L,
draw_lines=draw_lines)
ax.set_xlim(-1e-3, None)
ax.set_ylim(-1e-3, None)
if __name__ == '__main__':
figure_data = D.get_figure_data()
rcParams['font.size'] = 12.0
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = 'Arial'
rcParams['legend.fontsize'] = 'small'
rcParams['axes.labelsize'] = 12.0
rcParams['axes.titlesize'] = 12.0
rcParams['xtick.labelsize'] = 10.0
rcParams['ytick.labelsize'] = 10.0
# run this script in order to calculate the extrapolated growth rates for
# all the 200x200 grid and cache the results in a temp file for quick
# access for the scripts that plot the data
fig = pyplot.figure(figsize=(10, 5))
ax = fig.add_subplot(1, 2, 1)
plot_oxygen_sweep(ax)
ax = fig.add_subplot(1, 2, 2)
plot_oxygen_dual_pareto(figure_data['standard'], ax) | gpl-2.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/utils/tests/test_metaestimators.py | 86 | 2304 | from sklearn.utils.testing import assert_true, assert_false
from sklearn.utils.metaestimators import if_delegate_has_method
class Prefix(object):
def func(self):
pass
class MockMetaEstimator(object):
"""This is a mock meta estimator"""
a_prefix = Prefix()
@if_delegate_has_method(delegate="a_prefix")
def func(self):
"""This is a mock delegated function"""
pass
def test_delegated_docstring():
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.__dict__['func'].__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.func.__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator().func.__doc__))
class MetaEst(object):
"""A mock meta estimator"""
def __init__(self, sub_est, better_sub_est=None):
self.sub_est = sub_est
self.better_sub_est = better_sub_est
@if_delegate_has_method(delegate='sub_est')
def predict(self):
pass
class MetaEstTestTuple(MetaEst):
"""A mock meta estimator to test passing a tuple of delegates"""
@if_delegate_has_method(delegate=('sub_est', 'better_sub_est'))
def predict(self):
pass
class MetaEstTestList(MetaEst):
"""A mock meta estimator to test passing a list of delegates"""
@if_delegate_has_method(delegate=['sub_est', 'better_sub_est'])
def predict(self):
pass
class HasPredict(object):
"""A mock sub-estimator with predict method"""
def predict(self):
pass
class HasNoPredict(object):
"""A mock sub-estimator with no predict method"""
pass
def test_if_delegate_has_method():
assert_true(hasattr(MetaEst(HasPredict()), 'predict'))
assert_false(hasattr(MetaEst(HasNoPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestTuple(HasNoPredict(), HasNoPredict()), 'predict'))
assert_true(
hasattr(MetaEstTestTuple(HasPredict(), HasNoPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestTuple(HasNoPredict(), HasPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestList(HasNoPredict(), HasPredict()), 'predict'))
assert_true(
hasattr(MetaEstTestList(HasPredict(), HasPredict()), 'predict'))
| mit |
AnishShah/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 28 | 12795 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import sys
import argparse
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
# Give a folder path as an argument with '--log_dir' to save
# TensorBoard summaries. Default is a log folder in current directory.
current_path = os.path.dirname(os.path.realpath(sys.argv[0]))
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_dir',
type=str,
default=os.path.join(current_path, 'log'),
help='The log directory for TensorBoard summaries.')
FLAGS, unparsed = parser.parse_known_args()
# Create the directory for TensorBoard variables if there is not.
if not os.path.exists(FLAGS.log_dir):
os.makedirs(FLAGS.log_dir)
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
# This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - maps codes(integers) to words(strings)
data, count, dictionary, reverse_dictionary = build_dataset(
vocabulary, vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span) # pylint: disable=redefined-builtin
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer.extend(data[0:span])
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0],
reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. These 3 variables are used only for
# displaying model accuracy, they don't affect calculation.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
with tf.name_scope('inputs'):
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
with tf.name_scope('embeddings'):
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
with tf.name_scope('weights'):
nce_weights = tf.Variable(
tf.truncated_normal(
[vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
with tf.name_scope('biases'):
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
# Explanation of the meaning of NCE loss:
# http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/
with tf.name_scope('loss'):
loss = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Add the loss value as a scalar to summary.
tf.summary.scalar('loss', loss)
# Construct the SGD optimizer using a learning rate of 1.0.
with tf.name_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings,
valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Merge all summaries.
merged = tf.summary.merge_all()
# Add variable initializer.
init = tf.global_variables_initializer()
# Create a saver.
saver = tf.train.Saver()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# Open a writer to write summaries.
writer = tf.summary.FileWriter(FLAGS.log_dir, session.graph)
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips,
skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# Define metadata variable.
run_metadata = tf.RunMetadata()
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
# Also, evaluate the merged op to get all summaries from the returned "summary" variable.
# Feed metadata variable to session for visualizing the graph in TensorBoard.
_, summary, loss_val = session.run(
[optimizer, merged, loss],
feed_dict=feed_dict,
run_metadata=run_metadata)
average_loss += loss_val
# Add returned summaries to writer in each step.
writer.add_summary(summary, step)
# Add metadata to visualize the graph for the last run.
if step == (num_steps - 1):
writer.add_run_metadata(run_metadata, 'step%d' % step)
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Write corresponding labels for the embeddings.
with open(FLAGS.log_dir + '/metadata.tsv', 'w') as f:
for i in xrange(vocabulary_size):
f.write(reverse_dictionary[i] + '\n')
# Save the model for checkpoints.
saver.save(session, os.path.join(FLAGS.log_dir, 'model.ckpt'))
# Create a configuration for visualizing embeddings with the labels in TensorBoard.
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = embeddings.name
embedding_conf.metadata_path = os.path.join(FLAGS.log_dir, 'metadata.tsv')
projector.visualize_embeddings(writer, config)
writer.close()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(
label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(
perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
| apache-2.0 |
vinodkc/spark | python/pyspark/pandas/tests/data_type_ops/test_num_ops.py | 3 | 18636 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import unittest
from distutils.version import LooseVersion
import pandas as pd
import numpy as np
from pandas.api.types import CategoricalDtype
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.pandas.typedef.typehints import (
extension_dtypes_available,
extension_float_dtypes_available,
)
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class NumOpsTest(PandasOnSparkTestCase, TestCasesUtils):
"""Unit tests for arithmetic operations of numeric data types.
A few test cases are disabled because pandas-on-Spark returns float64 whereas pandas
returns float32.
The underlying reason is the respective Spark operations return DoubleType always.
"""
@property
def float_pser(self):
return pd.Series([1, 2, 3], dtype=float)
@property
def float_psser(self):
return ps.from_pandas(self.float_pser)
def test_add(self):
for pser, psser in self.numeric_pser_psser_pairs:
self.assert_eq(pser + pser, psser + psser)
self.assert_eq(pser + 1, psser + 1)
# self.assert_eq(pser + 0.1, psser + 0.1)
self.assert_eq(pser + pser.astype(bool), psser + psser.astype(bool))
self.assert_eq(pser + True, psser + True)
self.assert_eq(pser + False, psser + False)
with option_context("compute.ops_on_diff_frames", True):
for pser, psser in self.numeric_pser_psser_pairs:
self.assertRaises(TypeError, lambda: psser + self.non_numeric_pssers["string"])
self.assertRaises(TypeError, lambda: psser + self.non_numeric_pssers["datetime"])
self.assertRaises(TypeError, lambda: psser + self.non_numeric_pssers["date"])
self.assertRaises(TypeError, lambda: psser + self.non_numeric_pssers["categorical"])
self.assert_eq(
(psser + self.non_numeric_pssers["bool"]).sort_index(),
pser + self.non_numeric_psers["bool"],
)
def test_sub(self):
for pser, psser in self.numeric_pser_psser_pairs:
self.assert_eq(pser - pser, psser - psser)
self.assert_eq(pser - 1, psser - 1)
# self.assert_eq(pser - 0.1, psser - 0.1)
self.assert_eq(pser - pser.astype(bool), psser - psser.astype(bool))
self.assert_eq(pser - True, psser - True)
self.assert_eq(pser - False, psser - False)
with option_context("compute.ops_on_diff_frames", True):
for pser, psser in self.numeric_pser_psser_pairs:
self.assertRaises(TypeError, lambda: psser - self.non_numeric_pssers["string"])
self.assertRaises(TypeError, lambda: psser - self.non_numeric_pssers["datetime"])
self.assertRaises(TypeError, lambda: psser - self.non_numeric_pssers["date"])
self.assertRaises(TypeError, lambda: psser - self.non_numeric_pssers["categorical"])
self.assert_eq(
(psser - self.non_numeric_pssers["bool"]).sort_index(),
pser - self.non_numeric_psers["bool"],
)
def test_mul(self):
for pser, psser in self.numeric_pser_psser_pairs:
self.assert_eq(pser * pser, psser * psser)
self.assert_eq(pser * pser.astype(bool), psser * psser.astype(bool))
self.assert_eq(pser * True, psser * True)
self.assert_eq(pser * False, psser * False)
with option_context("compute.ops_on_diff_frames", True):
for pser, psser in self.numeric_pser_psser_pairs:
if psser.dtype in [int, np.int32]:
self.assert_eq(
(psser * self.non_numeric_pssers["string"]).sort_index(),
pser * self.non_numeric_psers["string"],
)
else:
self.assertRaises(TypeError, lambda: psser * self.non_numeric_pssers["string"])
self.assertRaises(TypeError, lambda: psser * self.non_numeric_pssers["datetime"])
self.assertRaises(TypeError, lambda: psser * self.non_numeric_pssers["date"])
self.assertRaises(TypeError, lambda: psser * self.non_numeric_pssers["categorical"])
self.assert_eq(
(psser * self.non_numeric_pssers["bool"]).sort_index(),
pser * self.non_numeric_psers["bool"],
)
def test_truediv(self):
for pser, psser in self.numeric_pser_psser_pairs:
if psser.dtype in [float, int, np.int32]:
self.assert_eq(pser / pser, psser / psser)
self.assert_eq(pser / pser.astype(bool), psser / psser.astype(bool))
self.assert_eq(pser / True, psser / True)
self.assert_eq(pser / False, psser / False)
with option_context("compute.ops_on_diff_frames", True):
for pser, psser in self.numeric_pser_psser_pairs:
self.assertRaises(TypeError, lambda: psser / self.non_numeric_pssers["string"])
self.assertRaises(TypeError, lambda: psser / self.non_numeric_pssers["datetime"])
self.assertRaises(TypeError, lambda: psser / self.non_numeric_pssers["date"])
self.assertRaises(TypeError, lambda: psser / self.non_numeric_pssers["categorical"])
self.assert_eq(
(self.float_psser / self.non_numeric_pssers["bool"]).sort_index(),
self.float_pser / self.non_numeric_psers["bool"],
)
def test_floordiv(self):
for pser, psser in self.numeric_pser_psser_pairs:
if psser.dtype == float:
self.assert_eq(pser // pser, psser // psser)
self.assert_eq(pser // pser.astype(bool), psser // psser.astype(bool))
self.assert_eq(pser // True, psser // True)
self.assert_eq(pser // False, psser // False)
with option_context("compute.ops_on_diff_frames", True):
for pser, psser in self.numeric_pser_psser_pairs:
self.assertRaises(TypeError, lambda: psser // self.non_numeric_pssers["string"])
self.assertRaises(TypeError, lambda: psser // self.non_numeric_pssers["datetime"])
self.assertRaises(TypeError, lambda: psser // self.non_numeric_pssers["date"])
self.assertRaises(
TypeError, lambda: psser // self.non_numeric_pssers["categorical"]
)
if LooseVersion(pd.__version__) >= LooseVersion("0.25.3"):
self.assert_eq(
(self.float_psser // self.non_numeric_pssers["bool"]).sort_index(),
self.float_pser // self.non_numeric_psers["bool"],
)
else:
self.assert_eq(
(self.float_pser // self.non_numeric_psers["bool"]).sort_index(),
ps.Series([1.0, 2.0, np.inf]),
)
def test_mod(self):
for pser, psser in self.numeric_pser_psser_pairs:
self.assert_eq(pser % pser, psser % psser)
self.assert_eq(pser % pser.astype(bool), psser % psser.astype(bool))
self.assert_eq(pser % True, psser % True)
if psser.dtype in [int, np.int32]:
self.assert_eq(ps.Series([np.nan, np.nan, np.nan], dtype=float), psser % False)
else:
self.assert_eq(
ps.Series([np.nan, np.nan, np.nan], dtype=psser.dtype), psser % False
)
with option_context("compute.ops_on_diff_frames", True):
for pser, psser in self.numeric_pser_psser_pairs:
self.assertRaises(TypeError, lambda: psser % self.non_numeric_pssers["string"])
self.assertRaises(TypeError, lambda: psser % self.non_numeric_pssers["datetime"])
self.assertRaises(TypeError, lambda: psser % self.non_numeric_pssers["date"])
self.assertRaises(TypeError, lambda: psser % self.non_numeric_pssers["categorical"])
self.assert_eq(
(self.float_psser % self.non_numeric_pssers["bool"]).sort_index(),
self.float_pser % self.non_numeric_psers["bool"],
)
def test_pow(self):
for pser, psser in self.numeric_pser_psser_pairs:
if psser.dtype == float:
self.assert_eq(pser ** pser, psser ** psser)
self.assert_eq(pser ** pser.astype(bool), psser ** psser.astype(bool))
self.assert_eq(pser ** True, psser ** True)
self.assert_eq(pser ** False, psser ** False)
with option_context("compute.ops_on_diff_frames", True):
for pser, psser in self.numeric_pser_psser_pairs:
self.assertRaises(TypeError, lambda: psser ** self.non_numeric_pssers["string"])
self.assertRaises(TypeError, lambda: psser ** self.non_numeric_pssers["datetime"])
self.assertRaises(TypeError, lambda: psser ** self.non_numeric_pssers["date"])
self.assertRaises(
TypeError, lambda: psser ** self.non_numeric_pssers["categorical"]
)
self.assert_eq(
(self.float_psser ** self.non_numeric_pssers["bool"]).sort_index(),
self.float_pser ** self.non_numeric_psers["bool"],
)
def test_radd(self):
for pser, psser in self.numeric_pser_psser_pairs:
self.assert_eq(1 + pser, 1 + psser)
# self.assert_eq(0.1 + pser, 0.1 + psser)
self.assertRaises(TypeError, lambda: "x" + psser)
self.assert_eq(True + pser, True + psser)
self.assert_eq(False + pser, False + psser)
self.assertRaises(TypeError, lambda: datetime.date(1994, 1, 1) + psser)
self.assertRaises(TypeError, lambda: datetime.datetime(1994, 1, 1) + psser)
def test_rsub(self):
for pser, psser in self.numeric_pser_psser_pairs:
self.assert_eq(1 - pser, 1 - psser)
# self.assert_eq(0.1 - pser, 0.1 - psser)
self.assertRaises(TypeError, lambda: "x" - psser)
self.assert_eq(True - pser, True - psser)
self.assert_eq(False - pser, False - psser)
self.assertRaises(TypeError, lambda: datetime.date(1994, 1, 1) - psser)
self.assertRaises(TypeError, lambda: datetime.datetime(1994, 1, 1) - psser)
def test_rmul(self):
for pser, psser in self.numeric_pser_psser_pairs:
self.assert_eq(1 * pser, 1 * psser)
# self.assert_eq(0.1 * pser, 0.1 * psser)
self.assertRaises(TypeError, lambda: "x" * psser)
self.assert_eq(True * pser, True * psser)
self.assert_eq(False * pser, False * psser)
self.assertRaises(TypeError, lambda: datetime.date(1994, 1, 1) * psser)
self.assertRaises(TypeError, lambda: datetime.datetime(1994, 1, 1) * psser)
def test_rtruediv(self):
for pser, psser in self.numeric_pser_psser_pairs:
# self.assert_eq(5 / pser, 5 / psser)
# self.assert_eq(0.1 / pser, 0.1 / psser)
self.assertRaises(TypeError, lambda: "x" / psser)
self.assert_eq((True / pser).astype(float), True / psser, check_exact=False)
self.assert_eq((False / pser).astype(float), False / psser)
self.assertRaises(TypeError, lambda: datetime.date(1994, 1, 1) / psser)
self.assertRaises(TypeError, lambda: datetime.datetime(1994, 1, 1) / psser)
def test_rfloordiv(self):
for pser, psser in self.numeric_pser_psser_pairs:
# self.assert_eq(5 // pser, 5 // psser)
# self.assert_eq(0.1 // pser, 0.1 // psser)
self.assertRaises(TypeError, lambda: "x" // psser)
self.assert_eq((True // pser).astype(float), True // psser)
self.assert_eq((False // pser).astype(float), False // psser)
self.assertRaises(TypeError, lambda: datetime.date(1994, 1, 1) // psser)
self.assertRaises(TypeError, lambda: datetime.datetime(1994, 1, 1) // psser)
def test_rpow(self):
for pser, psser in self.numeric_pser_psser_pairs:
# self.assert_eq(1 ** pser, 1 ** psser)
# self.assert_eq(0.1 ** pser, 0.1 ** psser)
self.assertRaises(TypeError, lambda: "x" ** psser)
self.assert_eq((True ** pser).astype(float), True ** psser)
self.assert_eq((False ** pser).astype(float), False ** psser)
self.assertRaises(TypeError, lambda: datetime.date(1994, 1, 1) ** psser)
self.assertRaises(TypeError, lambda: datetime.datetime(1994, 1, 1) ** psser)
def test_rmod(self):
for pser, psser in self.numeric_pser_psser_pairs:
self.assert_eq(1 % pser, 1 % psser)
# self.assert_eq(0.1 % pser, 0.1 % psser)
self.assert_eq(True % pser, True % psser)
self.assert_eq(False % pser, False % psser)
self.assertRaises(TypeError, lambda: datetime.date(1994, 1, 1) % psser)
self.assertRaises(TypeError, lambda: datetime.datetime(1994, 1, 1) % psser)
def test_and(self):
psser = self.numeric_pssers[0]
self.assertRaises(TypeError, lambda: psser & True)
self.assertRaises(TypeError, lambda: psser & False)
self.assertRaises(TypeError, lambda: psser & psser)
def test_rand(self):
psser = self.numeric_pssers[0]
self.assertRaises(TypeError, lambda: True & psser)
self.assertRaises(TypeError, lambda: False & psser)
def test_or(self):
psser = self.numeric_pssers[0]
self.assertRaises(TypeError, lambda: psser | True)
self.assertRaises(TypeError, lambda: psser | False)
self.assertRaises(TypeError, lambda: psser | psser)
def test_ror(self):
psser = self.numeric_pssers[0]
self.assertRaises(TypeError, lambda: True | psser)
self.assertRaises(TypeError, lambda: False | psser)
def test_from_to_pandas(self):
for pser, psser in self.numeric_pser_psser_pairs:
self.assert_eq(pser, psser.to_pandas())
self.assert_eq(ps.from_pandas(pser), psser)
def test_isnull(self):
for pser, psser in self.numeric_pser_psser_pairs:
self.assert_eq(pser.isnull(), psser.isnull())
def test_astype(self):
for pser, psser in self.numeric_pser_psser_pairs:
self.assert_eq(pser.astype(int), psser.astype(int))
self.assert_eq(pser.astype(float), psser.astype(float))
self.assert_eq(pser.astype(np.float32), psser.astype(np.float32))
self.assert_eq(pser.astype(np.int32), psser.astype(np.int32))
self.assert_eq(pser.astype(np.int16), psser.astype(np.int16))
self.assert_eq(pser.astype(np.int8), psser.astype(np.int8))
self.assert_eq(pser.astype(str), psser.astype(str))
self.assert_eq(pser.astype(bool), psser.astype(bool))
self.assert_eq(pser.astype("category"), psser.astype("category"))
cat_type = CategoricalDtype(categories=[2, 1, 3])
self.assert_eq(pser.astype(cat_type), psser.astype(cat_type))
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
class IntegralExtensionOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def intergral_extension_psers(self):
dtypes = ["Int8", "Int16", "Int32", "Int64"]
return [pd.Series([1, 2, 3, None], dtype=dtype) for dtype in dtypes]
@property
def intergral_extension_pssers(self):
return [ps.from_pandas(pser) for pser in self.intergral_extension_psers]
@property
def intergral_extension_pser_psser_pairs(self):
return zip(self.intergral_extension_psers, self.intergral_extension_pssers)
def test_from_to_pandas(self):
for pser, psser in self.intergral_extension_pser_psser_pairs:
self.check_extension(pser, psser.to_pandas())
self.check_extension(ps.from_pandas(pser), psser)
def test_isnull(self):
for pser, psser in self.intergral_extension_pser_psser_pairs:
self.assert_eq(pser.isnull(), psser.isnull())
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
class FractionalExtensionOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def fractional_extension_psers(self):
dtypes = ["Float32", "Float64"]
return [pd.Series([0.1, 0.2, 0.3, None], dtype=dtype) for dtype in dtypes]
@property
def fractional_extension_pssers(self):
return [ps.from_pandas(pser) for pser in self.fractional_extension_psers]
@property
def fractional_extension_pser_psser_pairs(self):
return zip(self.fractional_extension_psers, self.fractional_extension_pssers)
def test_from_to_pandas(self):
for pser, psser in self.fractional_extension_pser_psser_pairs:
self.check_extension(pser, psser.to_pandas())
self.check_extension(ps.from_pandas(pser), psser)
def test_isnull(self):
for pser, psser in self.fractional_extension_pser_psser_pairs:
self.assert_eq(pser.isnull(), psser.isnull())
if __name__ == "__main__":
from pyspark.pandas.tests.data_type_ops.test_num_ops import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
tuanvu216/udacity-course | intro_to_machine_learning/lesson/lesson_8_clustering/k_means_cluster.py | 1 | 4196 | #!/usr/bin/python
"""
skeleton code for k-means clustering mini-project
"""
import pickle
import numpy
import matplotlib.pyplot as plt
import sys
sys.path.append("C:/Vindico/Projects/Code/Python/Python/Course/Udacity/Intro to Machine Learning/ud120-projects-master/tools/")
from feature_format import featureFormat, targetFeatureSplit
def Draw(pred, features, poi, mark_poi=False, name="image.png", f1_name="feature 1", f2_name="feature 2"):
""" some plotting code designed to help you visualize your clusters """
### plot each cluster with a different color--add more colors for
### drawing more than 4 clusters
colors = ["b", "c", "k", "m", "g"]
for ii, pp in enumerate(pred):
plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])
### if you like, place red stars over points that are POIs (just for funsies)
if mark_poi:
for ii, pp in enumerate(pred):
if poi[ii]:
plt.scatter(features[ii][0], features[ii][1], color="r", marker="*")
plt.xlabel(f1_name)
plt.ylabel(f2_name)
plt.savefig(name)
plt.show()
### load in the dict of dicts containing all the data on each person in the dataset
data_dict = pickle.load( open("C:/Vindico/Projects/Code/Python/Python/Course/Udacity/Intro to Machine Learning/ud120-projects-master/final_project/final_project_dataset.pkl", "r") )
### there's an outlier--remove it!
data_dict.pop("TOTAL", 0)
### the input features we want to use
### can be any key in the person-level dictionary (salary, director_fees, etc.)
feature_1 = "salary"
feature_2 = "exercised_stock_options"
feature_3 = "total_payments"
poi = "poi"
features_list = [poi, feature_1, feature_2,feature_3]
data = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data )
### in the "clustering with 3 features" part of the mini-project,
### you'll want to change this line to
### for f1, f2, _ in finance_features:
### (as it's currently written, line below assumes 2 features)
for f1, f2, f3 in finance_features:
plt.scatter( f1, f2 ,f3)
plt.show()
from sklearn.cluster import KMeans
features_list = ["poi", feature_1, feature_2,feature_3]
data2 = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data2 )
clf = KMeans(n_clusters=2)
pred = clf.fit_predict( finance_features )
Draw(pred, finance_features, poi, name="clusters_before_scaling.pdf", f1_name=feature_1, f2_name=feature_2)
### cluster here; create predictions of the cluster labels
### for the data and store them to a list called pred
try:
Draw(pred, finance_features, poi, mark_poi=False, name="clusters.pdf", f1_name=feature_1, f2_name=feature_2)
except NameError:
print "no predictions object named pred found, no clusters to plot"
# Find exercised_stock_options
import operator
stock = {k: v['exercised_stock_options'] for k, v in data_dict.items() if v['exercised_stock_options'] != 'NaN'}
# Maximum exercised_stock_options
maxval = max(stock.iteritems(), key=operator.itemgetter(1))[1]
max_stock = {k: v for k,v in stock.items() if v==maxval}
max_stock
# Minimum exercised_stock_options
minval = min(stock.iteritems(), key=operator.itemgetter(1))[1]
min_stock = {k: v for k,v in stock.items() if v==minval}
min_stock
# Stocks
ex_stok = []
for users in data_dict:
val = data_dict[users]["exercised_stock_options"]
if val == 'NaN':
continue
ex_stok.append(val)
print max(ex_stok)
print min(ex_stok)
# Find salary
salary = {k: v['salary'] for k, v in data_dict.items() if v['salary'] != 'NaN'}
# Maximum salary
maxval = max(salary.iteritems(), key=operator.itemgetter(1))[1]
max_salary = {k: v for k,v in salary.items() if v==maxval}
max_salary
# Minimum salary
minval = min(salary.iteritems(), key=operator.itemgetter(1))[1]
min_salary = {k: v for k,v in salary.items() if v==minval}
min_salary
# Salary
salary = []
for users in data_dict:
val = data_dict[users]["salary"]
if val == 'NaN':
continue
salary.append(val)
print max(salary)
print min(salary)
#{k: v['salary'] for k, v in data_dict.items() if v['salary'] != 'NaN' and v['salary'] < 4000}
| mit |
bbengfort/inigo | inigo/utils/stats.py | 1 | 3681 | # inigo.utils.stats
# Objects for computing Statistics and probabilities
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Thu Nov 13 13:47:15 2014 -0500
#
# Copyright (C) 2014 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: stats.py [] benjamin@bengfort.com $
"""
Objects for computing Statistics and probabilities
"""
##########################################################################
## Imports
##########################################################################
import json
from itertools import islice
from collections import Counter
##########################################################################
## Frequency Distribution
##########################################################################
class FreqDist(Counter):
"""
Based off of NLTK's FreqDist - this records the number of times each
outcome of an experiment has occured. Useful for tracking metrics.
"""
@classmethod
def load(klass, stream):
"""
Load a FreqDist from a dump on disk
"""
data = json.load(stream)
dist = klass()
for sample, count in data.items():
dist[sample] = count
return dist
def N(self):
"""
The total number of samples that have been recorded. For unique
samples with counts greater than zero, use B.
"""
return sum(self.values())
def B(self):
"""
Return the number of sample values or bins that have counts > 0.
"""
return len(self)
def freq(self, key):
"""
Returns the frequency of a sample defined as the count of the
sample divided by the total number of outcomes. Frequencies are
always real numbers in the range [0,1].
"""
if self.N() == 0: return 0
return float(self[key]) / self.N()
def ratio(self, a, b):
"""
Returns the ratio of two sample counts as a float.
"""
if b not in self: return 0
return float(self[a]) / float(self[b])
def max(self):
"""
Return the sample with the greatest number of outcomes.
"""
if len(self) == 0: return None
return self.most_common(1)[0][0]
def plot(self, *args, **kwargs):
"""
Plot the samples from the frequency distribution. Requires pylab.
"""
try:
import pylab
except ImportError:
raise ValueError("The plot function requires matplotlib.")
if len(args) == 0:
args = [len(self)]
samples = list(islice(self, *args))
freqs = [self[sample] for sample in samples]
ylabel = "Counts"
pylab.grid(True, color="silver")
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
if "title" in kwargs:
pylab.title(kwargs["title"])
del kwargs["title"]
pylab.plot(freqs, **kwargs)
pylab.xticks(range(len(samples)), [str(s) for s in samples], rotation=90)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def dump(self, stream):
"""
Dump the collection to a JSON file on disk
"""
json.dump(self, stream)
def __repr__(self):
return self.pprint()
def pprint(self, maxlen=10):
items = ['{0!r}: {1!r}'.format(*item) for item in self.most_common(maxlen)]
if len(self) > maxlen:
items.append('...')
return 'FreqDist({{{0}}})'.format(', '.join(items))
def __str__(self):
return "<FreqDist with %i samples and %i outcomes>" % (self.B(), self.N())
| mit |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/animation/bayes_update.py | 7 | 1478 | # update a distribution based on new data.
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as ss
from matplotlib.animation import FuncAnimation
class UpdateDist(object):
def __init__(self, ax, prob=0.5):
self.success = 0
self.prob = prob
self.line, = ax.plot([], [], 'k-')
self.x = np.linspace(0, 1, 200)
self.ax = ax
# Set up plot parameters
self.ax.set_xlim(0, 1)
self.ax.set_ylim(0, 15)
self.ax.grid(True)
# This vertical line represents the theoretical value, to
# which the plotted distribution should converge.
self.ax.axvline(prob, linestyle='--', color='black')
def init(self):
self.success = 0
self.line.set_data([], [])
return self.line,
def __call__(self, i):
# This way the plot can continuously run and we just keep
# watching new realizations of the process
if i == 0:
return self.init()
# Choose success based on exceed a threshold with a uniform pick
if np.random.rand(1,) < self.prob:
self.success += 1
y = ss.beta.pdf(self.x, self.success + 1, (i - self.success) + 1)
self.line.set_data(self.x, y)
return self.line,
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ud = UpdateDist(ax, prob=0.7)
anim = FuncAnimation(fig, ud, frames=np.arange(100), init_func=ud.init,
interval=100, blit=True)
plt.show()
| mit |
ypkang/Dato-Core | src/unity/python/graphlab/test/test_sframe.py | 13 | 108197 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
# from nose import with_setup
import graphlab as gl
from graphlab.data_structures.sframe import SFrame
from graphlab.data_structures.sarray import SArray
from graphlab.data_structures.image import Image
from graphlab.connect import main as glconnect
from graphlab.connect import server
from graphlab.util import _assert_sframe_equal
import pandas as pd
from graphlab_util.timezone import GMT
from pandas.util.testing import assert_frame_equal
import unittest
import datetime as dt
import tempfile
import os
import csv
import gzip
import util
import string
import time
import numpy as np
import array
import math
import random
import shutil
import functools
HAS_PYSPARK = True
try:
from pyspark import SparkContext, SQLContext
except:
HAS_PYSPARK = False
#######################################################
# Metrics tracking tests are in test_usage_metrics.py #
#######################################################
# Taken from http://stackoverflow.com/questions/1151658/python-hashable-dicts
# by Alex Martelli
class hashabledict(dict):
def __key(self):
return tuple((k,self[k]) for k in sorted(self))
def __hash__(self):
return hash(self.__key())
def __eq__(self, other):
return self.__key() == other.__key()
class SFrameTest(unittest.TestCase):
def setUp(self):
self.int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.float_data = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
self.string_data = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
self.a_to_z = [str(unichr(97 + i)) for i in range(0, 26)]
self.dataframe = pd.DataFrame({'int_data': self.int_data, 'float_data': self.float_data, 'string_data': self.string_data})
self.url = "http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz"
self.int_data2 = range(50,60)
self.float_data2 = [1.0 * i for i in range(50,60)]
self.string_data2 = [str(i) for i in range(50,60)]
self.dataframe2 = pd.DataFrame({'int_data': self.int_data2, 'float_data': self.float_data2, 'string_data': self.string_data2})
# Taken from http://en.wikipedia.org/wiki/Join_(SQL) for fun.
self.employees_sf = SFrame()
self.employees_sf.add_column(SArray(['Rafferty','Jones','Heisenberg','Robinson','Smith','John']), 'last_name')
self.employees_sf.add_column(SArray([31,33,33,34,34,None]), 'dep_id')
self.departments_sf = SFrame()
self.departments_sf.add_column(SArray([31,33,34,35]), 'dep_id')
self.departments_sf.add_column(SArray(['Sales','Engineering','Clerical','Marketing']), 'dep_name')
def __assert_sarray_equal(self, sa1, sa2):
l1 = list(sa1)
l2 = list(sa2)
self.assertEquals(len(l1), len(l2))
for i in range(len(l1)):
v1 = l1[i]
v2 = l2[i]
if v1 == None:
self.assertEqual(v2, None)
else:
if type(v1) == dict:
self.assertEquals(len(v1), len(v2))
for key in v1:
self.assertTrue(v1.has_key(key))
self.assertEqual(v1[key], v2[key])
elif (hasattr(v1, "__iter__")):
self.assertEquals(len(v1), len(v2))
for j in range(len(v1)):
t1 = v1[j]; t2 = v2[j]
if (type(t1) == float):
if (math.isnan(t1)):
self.assertTrue(math.isnan(t2))
else:
self.assertEquals(t1, t2)
else:
self.assertEquals(t1, t2)
else:
self.assertEquals(v1, v2)
def test_split_datetime(self):
from_zone = GMT(0)
to_zone = GMT(4.5)
utc = dt.datetime.strptime('2011-01-21 02:37:21', '%Y-%m-%d %H:%M:%S')
utc = utc.replace(tzinfo=from_zone)
central = utc.astimezone(to_zone)
sa = SArray([utc,central])
expected = SFrame()
expected ['X.year'] = [2011,2011]
expected ['X.month'] = [1,1]
expected ['X.day'] = [21,21]
expected ['X.hour'] = [2,7]
expected ['X.minute'] = [37,7]
expected ['X.second'] = [21,21]
expected ['X.tzone'] = [0.0,4.5]
result = sa.split_datetime(tzone=True)
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# column names
expected = SFrame()
expected ['ttt.year'] = [2011,2011]
expected ['ttt.minute'] = [37,7]
expected ['ttt.second'] = [21,21]
result = sa.split_datetime(column_name_prefix='ttt',limit=['year','minute','second']);
self.assertEqual(result.column_names(), ['ttt.year', 'ttt.minute', 'ttt.second'])
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
sf = SFrame({'datetime': sa})
result = sf.split_datetime('datetime', column_name_prefix='ttt',limit=['year','minute','second']);
self.assertEqual(result.column_names(), ['ttt.year', 'ttt.minute', 'ttt.second'])
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
def __test_equal(self, sf, df):
self.assertEquals(sf.num_rows(), df.shape[0])
self.assertEquals(sf.num_cols(), df.shape[1])
assert_frame_equal(sf.to_dataframe(), df)
def __create_test_df(self, size):
int_data = []
float_data = []
string_data = []
for i in range(0,size):
int_data.append(i)
float_data.append(float(i))
string_data.append(str(i))
return pd.DataFrame({'int_data': int_data,
'float_data': float_data,
'string_data': string_data})
# Test if the rows are all the same...row order does not matter.
# (I do expect column order to be the same)
def __assert_join_results_equal(self, sf, expected_sf):
_assert_sframe_equal(sf, expected_sf, check_row_order=False)
def test_creation_from_dataframe(self):
# created from empty dataframe
sf_empty = SFrame(data=pd.DataFrame())
self.__test_equal(sf_empty, pd.DataFrame())
sf = SFrame(data=self.dataframe, format='dataframe')
self.__test_equal(sf, self.dataframe)
sf = SFrame(data=self.dataframe, format='auto')
self.__test_equal(sf, self.dataframe)
original_p = pd.DataFrame({'a':[1.0, float('nan')]})
effective_p = pd.DataFrame({'a':[1.0, None]})
sf = SFrame(data=original_p)
self.__test_equal(sf, effective_p)
original_p = pd.DataFrame({'a':['a',None,'b',float('nan')]})
effective_p = pd.DataFrame({'a':['a',None,'b',None]})
sf = SFrame(data=original_p)
self.__test_equal(sf, effective_p)
def test_auto_parse_csv(self):
with tempfile.NamedTemporaryFile(delete=False) as csvfile:
df = pd.DataFrame({'float_data': self.float_data,
'int_data': self.int_data,
'string_data': self.a_to_z[:len(self.int_data)]})
df.to_csv(csvfile, index=False)
csvfile.close()
sf = SFrame.read_csv(csvfile.name, header=True)
self.assertEqual(sf.dtype(), [float, int, str])
self.__test_equal(sf, df)
def test_parse_csv(self):
with tempfile.NamedTemporaryFile(delete=False) as csvfile:
self.dataframe.to_csv(csvfile, index=False)
csvfile.close()
# list type hints
sf = SFrame.read_csv(csvfile.name,
column_type_hints=[int, int, str])
self.assertEqual(sf.dtype(), [int, int, str])
sf['int_data'] = sf['int_data'].astype(int)
sf['float_data'] = sf['float_data'].astype(float)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, self.dataframe)
# list type hints, incorrect number of columns
self.assertRaises(RuntimeError,
lambda: SFrame.read_csv(csvfile.name,
column_type_hints=[int, float]))
# dictionary type hints
sf = SFrame.read_csv(csvfile.name,
column_type_hints={'int_data': int,
'float_data': float,
'string_data': str})
self.__test_equal(sf, self.dataframe)
# single value type hints
sf = SFrame.read_csv(csvfile.name, column_type_hints=str)
self.assertEqual(sf.dtype(), [str, str, str])
all_string_column_df = self.dataframe.apply(lambda x: [str(ele) for ele in x])
self.__test_equal(sf, all_string_column_df)
# single value type hints row limit
sf = SFrame.read_csv(csvfile.name, column_type_hints=str, nrows=5)
self.assertEqual(sf.dtype(), [str, str, str])
all_string_column_df = self.dataframe.apply(lambda x: [str(ele) for ele in x])
self.assertEqual(len(sf), 5)
self.__test_equal(sf, all_string_column_df[0:len(sf)])
sf = SFrame.read_csv(csvfile.name)
sf2 = SFrame(csvfile.name, format='csv')
self.__test_equal(sf2, sf.to_dataframe())
f = open(csvfile.name, "w")
f.write('a,b,c\n')
f.write('NA,PIKA,CHU\n')
f.write('1.0,2,3\n')
f.close()
# Default type hints, all column will be string type
sf = SFrame.read_csv(csvfile.name,
na_values=['NA','PIKA','CHU'],
column_type_hints={'a':float,'b':int})
t = list(sf['a'])
self.assertEquals(t[0], None)
self.assertEquals(t[1], 1.0)
t = list(sf['b'])
self.assertEquals(t[0], None)
self.assertEquals(t[1], 2)
t = list(sf['c'])
self.assertEquals(t[0], None)
self.assertEquals(t[1], "3")
def test_save_load_file_cleanup(self):
# when some file is in use, file should not be deleted
with util.TempDirectory() as f:
sf = SFrame()
sf['a'] = SArray(range(1,1000000))
sf.save(f)
# many for each sarray, 1 sframe_idx, 1 object.bin, 1 ini
file_count = len(os.listdir(f))
self.assertTrue(file_count > 3);
# sf1 now references the on disk file
sf1 = SFrame(f);
# create another SFrame and save to the same location
sf2 = SFrame()
sf2['b'] = SArray([str(i) for i in range(1,100000)])
sf2['c'] = SArray(range(1, 100000))
sf2.save(f)
file_count = len(os.listdir(f))
self.assertTrue(file_count > 3);
# now sf1 should still be accessible
self.__test_equal(sf1, sf.to_dataframe())
# and sf2 is correct too
sf3 = SFrame(f)
self.__test_equal(sf3, sf2.to_dataframe())
# when sf1 goes out of scope, the tmp files should be gone
sf1 = 1
time.sleep(1) # give time for the files being deleted
file_count = len(os.listdir(f))
self.assertTrue(file_count > 3);
def test_save_load(self):
# Check top level load function, with no suffix
with util.TempDirectory() as f:
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f)
sf2 = gl.load_sframe(f)
self.__test_equal(sf2, self.dataframe)
# Check individual formats with the SFrame constructor
formats = ['.csv']
for suffix in formats:
with tempfile.NamedTemporaryFile(suffix=suffix) as f:
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f.name)
sf2 = SFrame(f.name)
sf2['int_data'] = sf2['int_data'].astype(int)
sf2['float_data'] = sf2['float_data'].astype(float)
self.__test_equal(sf2, self.dataframe)
g=SArray([['a','b',3],[{'a':'b'}],[1,2,3]])
g2=SFrame()
g2['x']=g
g2.save(f.name)
g3=gl.SFrame.read_csv(f.name,column_type_hints=list)
self.__test_equal(g2, g3.to_dataframe())
# Make sure this file don't exist before testing
self.assertRaises(IOError, lambda: SFrame(data='__no_such_file__.frame_idx', format='sframe'))
# Bad permission
test_dir = 'test_dir'
if os.path.exists(test_dir):
os.removedirs(test_dir)
os.makedirs(test_dir, mode=0000)
with self.assertRaises(IOError):
sf.save(os.path.join(test_dir, 'bad.frame_idx'))
# Permissions will affect this test first, so no need
# to write something here
with self.assertRaises(IOError):
sf2 = SFrame(os.path.join(test_dir, 'bad.frame_idx'))
# cleanup
os.removedirs(test_dir)
del sf2
def test_save_to_csv(self):
with tempfile.NamedTemporaryFile(suffix='csv', delete=True) as f:
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f.name, format='csv')
sf2 = SFrame.read_csv(f.name + '.csv', column_type_hints={'int_data': int, 'float_data': float, 'string_data': str})
self.__test_equal(sf2, self.dataframe)
def _remove_sframe_files(self, prefix):
filelist = [ f for f in os.listdir(".") if f.startswith(prefix) ]
for f in filelist:
os.remove(f)
def test_creation_from_csv_on_server(self):
# create from 'remote' csv url
with tempfile.NamedTemporaryFile(suffix='.csv') as csvfile:
basesf = SFrame(self.dataframe)
basesf.save(csvfile.name, format="csv")
# Read csv giving type hints
#sf = SFrame(data='remote://' + csvfile.name, format='csv',
# column_type_hints={'int_data': int, 'float_data': float, 'string_data': str})
sf = SFrame.read_csv('remote://' + csvfile.name,
column_type_hints={'int_data': int, 'float_data': float, 'string_data': str})
self.__test_equal(sf, self.dataframe)
# Read csv without giving type hints, all column will be string type
sf = SFrame(data='remote://' + csvfile.name, format='csv')
self.assertEquals(sf['float_data'].dtype(), int)
sf['float_data'] = sf['float_data'].astype(float)
self.__test_equal(sf, self.dataframe)
def test_creation_from_txt(self):
with tempfile.NamedTemporaryFile(suffix='.txt') as f:
df = self.dataframe[['string_data']]
df.to_csv(f.name, index=False)
sf = SFrame(f.name)
self.assertEquals(sf['string_data'].dtype(), int)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, df)
with tempfile.NamedTemporaryFile(suffix='.txt.gz') as fgzip:
f_in = open(f.name, 'rb')
f_out = gzip.open(fgzip.name, 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
sf = SFrame(fgzip.name)
self.assertEquals(sf['string_data'].dtype(), int)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, df)
def test_creation_from_csv_on_local(self):
if (isinstance(glconnect.get_server(), server.LocalServer)):
if os.path.exists('./foo.csv'):
os.remove('./foo.csv')
with open('./foo.csv', 'w') as f:
url = f.name
basesf = SFrame(self.dataframe)
basesf.save(url, format="csv")
f.close()
sf = SFrame('./foo.csv')
self.assertEquals(sf['float_data'].dtype(), int)
sf['float_data'] = sf['float_data'].astype(float)
self.__test_equal(sf, self.dataframe)
sf = SFrame(url)
self.assertEquals(sf['float_data'].dtype(), int)
sf['float_data'] = sf['float_data'].astype(float)
self.__test_equal(sf, self.dataframe)
os.remove(url)
# test Windows line endings
if os.path.exists('./windows_lines.csv'):
os.remove('./windows_lines.csv')
windows_file_url = None
with open('./windows_lines.csv', 'w') as f:
windows_file_url = f.name
def_writer = csv.writer(f, dialect='excel')
column_list = ['numbers']
def_writer.writerow(column_list)
for i in self.int_data:
def_writer.writerow([i])
sf = SFrame.read_csv('./windows_lines.csv', column_type_hints={'numbers':int})
self.assertEquals(sf.column_names(), column_list)
self.assertEquals(sf.column_types(), [int])
self.assertEquals(list(sf['numbers'].head()), self.int_data)
sf = SFrame.read_csv('./windows_lines.csv', column_type_hints={'numbers':list}, error_bad_lines=False)
self.assertEquals(sf.column_names(), column_list)
self.assertEquals(sf.num_rows(), 0)
os.remove(windows_file_url)
def test_creation_from_csv_on_http(self):
pass
# sf = SFrame(data=self.url, use_header=False)
# self.__test_equal(sf, pd.DataFrame({'1': self.a_to_z}))
def test_creation_from_csv_on_s3(self):
# Requires s3 account for jenkins
# sf = SFrame(data='s3://graphlab-testdata/foo.csv')
# print sf.head(sf.num_rows())
pass
def test_creation_from_csv_dir_local(self):
csv_dir = "./csv_dir"
if os.path.exists(csv_dir):
shutil.rmtree(csv_dir)
os.mkdir(csv_dir)
for i in range(0, 100):
with open(os.path.join(csv_dir, 'foo.%d.csv' % i), 'w') as f:
url = f.name
self.dataframe.to_csv(url, index=False)
f.close()
singleton_sf = SFrame.read_csv(os.path.join(csv_dir, "foo.0.csv"))
self.assertEquals(singleton_sf.num_rows(), 10)
many_sf = SFrame.read_csv(csv_dir)
self.assertEquals(many_sf.num_rows(), 1000)
glob_sf = SFrame.read_csv(os.path.join(csv_dir, "foo.*2.csv"))
self.assertEquals(glob_sf.num_rows(), 100)
with self.assertRaises(RuntimeError):
SFrame.read_csv("missingdirectory")
with self.assertRaises(ValueError):
SFrame.read_csv("")
shutil.rmtree(csv_dir)
def test_creation_from_iterable(self):
# Normal dict of lists
the_dict = {'ints':self.int_data,'floats':self.float_data,'strings':self.string_data}
sf = SFrame(the_dict)
df = pd.DataFrame(the_dict)
self.__test_equal(sf, df)
# Test that a missing value does not change the data type
the_dict['ints'][0] = None
sf = SFrame(the_dict)
self.assertEquals(sf['ints'].dtype(), int)
# numpy.nan is actually a float, so it should cast the column to float
the_dict['ints'][0] = np.nan
sf = SFrame(the_dict)
self.assertEquals(sf['ints'].dtype(), float)
# Just a single list
sf = SFrame(self.int_data)
df = pd.DataFrame(self.int_data)
df.columns = ['X1']
self.__test_equal(sf, df)
# Normal list of lists
list_of_lists = [[1.0,2.0,3.0],[4.0,5.0,6.0],[7.0,8.0,9.0]]
sf = SFrame(list_of_lists)
cntr = 0
for i in sf:
self.assertEquals(list_of_lists[cntr], list(i['X1']))
cntr += 1
self.assertEquals(sf.num_columns(), 1)
the_dict = {'ints':self.int_data,'floats':self.float_data,'strings':self.string_data}
sf = SFrame(the_dict)
sf2 = SFrame({'ints':sf['ints'],'floats':sf['floats'],'strings':sf['strings']})
df = pd.DataFrame(the_dict)
self.__test_equal(sf2, df)
sf2 = SFrame([sf['ints'],sf['floats'],sf['strings']])
self.assertEquals(['X1','X2','X3'],sf2.column_names())
sf2.rename({'X1':'ints','X2':'floats','X3':'strings'})
sf2=sf2[['floats','ints','strings']]
self.__test_equal(sf2, df)
def test_head_tail(self):
sf = SFrame(data=self.dataframe)
assert_frame_equal(sf.head(4).to_dataframe(), self.dataframe.head(4))
# Cannot test for equality the same way because of dataframe indices
taildf = sf.tail(4)
for i in range(0, 4):
self.assertEqual(taildf['int_data'][i], self.dataframe['int_data'][i+6])
self.assertEqual(taildf['float_data'][i], self.dataframe['float_data'][i+6])
self.assertEqual(taildf['string_data'][i], self.dataframe['string_data'][i+6])
def test_head_tail_edge_case(self):
sf = SFrame()
self.assertEquals(sf.head().num_columns(), 0)
self.assertEquals(sf.tail().num_columns(), 0)
self.assertEquals(sf.head().num_rows(), 0)
self.assertEquals(sf.tail().num_rows(), 0)
sf = SFrame()
sf['a'] = []
self.assertEquals(sf.head().num_columns(), 1)
self.assertEquals(sf.tail().num_columns(), 1)
self.assertEquals(sf.head().num_rows(), 0)
self.assertEquals(sf.tail().num_rows(), 0)
def test_transform(self):
sf = SFrame(data=self.dataframe)
for i in range(sf.num_cols()):
colname = sf.column_names()[i]
sa = sf.apply(lambda x: x[colname], sf.column_types()[i])
self.__assert_sarray_equal(sa, sf[sf.column_names()[i]])
sa = sf.apply(lambda x: x['int_data'] + x['float_data'], float)
self.__assert_sarray_equal(sf['int_data'] + sf['float_data'], sa)
def test_transform_with_type_inference(self):
sf = SFrame(data=self.dataframe)
for i in range(sf.num_cols()):
colname = sf.column_names()[i]
sa = sf.apply(lambda x: x[colname])
self.__assert_sarray_equal(sa, sf[sf.column_names()[i]])
sa = sf.apply(lambda x: x['int_data'] + x['float_data'])
self.__assert_sarray_equal(sf['int_data'] + sf['float_data'], sa)
# SFrame apply returns list of vector of numeric should be vector, not list
sa = sf.apply(lambda x: [x['int_data'], x['float_data']])
self.assertEqual(sa.dtype(), array.array);
def test_transform_with_exception(self):
sf = SFrame(data=self.dataframe)
self.assertRaises(KeyError, lambda: sf.apply(lambda x: x['some random key'])) # cannot find the key
self.assertRaises(TypeError, lambda: sf.apply(lambda x: sum(x.values()))) # lambda cannot sum int and str
self.assertRaises(ZeroDivisionError, lambda: sf.apply(lambda x: x['int_data'] / 0)) # divide by 0 error
self.assertRaises(IndexError, lambda: sf.apply(lambda x: x.values()[10])) # index out of bound error
def test_empty_transform(self):
sf = SFrame()
b = sf.apply(lambda x:x)
self.assertEquals(len(b.head()), 0)
def test_flatmap(self):
# Correctness of typical usage
n = 10
sf = SFrame({'id': range(n)})
new_sf = sf.flat_map(["id_range"], lambda x: [[str(i)] for i in range(x['id'])])
self.assertEqual(new_sf.column_names(), ["id_range"])
self.assertEqual(new_sf.column_types(), [str])
expected_col = [str(x) for i in range(n) for x in range(i)]
self.assertListEqual(list(new_sf['id_range']), expected_col)
# Empty SFrame, without explicit column types
sf = gl.SFrame()
with self.assertRaises(TypeError):
new_sf = sf.flat_map(['id_range'],
lambda x: [[i] for i in range(x['id'])])
# Empty rows successfully removed
sf = gl.SFrame({'id': range(15)})
new_sf = sf.flat_map(['id'],
lambda x: [[x['id']]] if x['id'] > 8 else [])
self.assertEqual(new_sf.num_rows(), 6)
# First ten rows are empty raises error
with self.assertRaises(TypeError):
new_sf = sf.flat_map(['id'],
lambda x: [[x['id']]] if x['id'] > 9 else [])
def test_select_column(self):
sf = SFrame(data=self.dataframe)
sub_sf = sf.select_columns(['int_data', 'string_data'])
exp_df = pd.DataFrame({'int_data': self.int_data, 'string_data': self.string_data})
self.__test_equal(sub_sf, exp_df)
with self.assertRaises(ValueError):
sf.select_columns(['int_data', 'string_data', 'int_data'])
# test indexing
sub_col = sf['float_data']
self.assertEqual(sub_col.head(10), self.float_data)
with self.assertRaises(TypeError):
sub_sf = sf.select_columns(['duh',1])
with self.assertRaises(TypeError):
sub_sf = sf.select_columns(0)
with self.assertRaises(RuntimeError):
sub_sf = sf.select_columns(['not_a_column'])
sf = SFrame()
with self.assertRaises(RuntimeError):
sf.select_column('x')
with self.assertRaises(RuntimeError):
sf.select_columns(['x'])
sf.add_column(gl.SArray(), 'x')
# does not throw
sf.select_column('x')
sf.select_columns(['x'])
with self.assertRaises(RuntimeError):
sf.select_column('y')
with self.assertRaises(RuntimeError):
sf.select_columns(['y'])
def test_topk(self):
sf = SFrame(data=self.dataframe)
# Test that order is preserved
df2 = sf.topk('int_data').to_dataframe()
df2_expected = self.dataframe.sort('int_data', ascending=False)
df2_expected.index = range(df2.shape[0])
assert_frame_equal(df2, df2_expected)
df2 = sf.topk('float_data', 3).to_dataframe()
df2_expected = self.dataframe.sort('float_data', ascending=False).head(3)
df2_expected.index = range(3)
assert_frame_equal(df2, df2_expected)
df2 = sf.topk('string_data', 3).to_dataframe()
for i in range(0, 3):
self.assertEqual(df2['int_data'][2-i], i + 7)
with self.assertRaises(TypeError):
sf.topk(2,3)
sf = SFrame()
sf.add_column(SArray([1,2,3,4,5]), 'a')
sf.add_column(SArray([1,2,3,4,5]), 'b')
sf.topk('a', 1) # should not fail
def test_filter(self):
sf = SFrame(data=self.dataframe)
filter_sa = SArray([1,1,1,0,0,0,0,1,1,1])
sf2 = sf[filter_sa]
exp_df = sf.head(3).append(sf.tail(3))
self.__test_equal(sf2, exp_df.to_dataframe())
# filter by 1s
sf2 = sf[SArray(self.int_data)]
exp_df = sf.head(10).to_dataframe()
self.__test_equal(sf2, exp_df)
# filter by 0s
sf2 = sf[SArray([0,0,0,0,0,0,0,0,0,0])]
exp_df = sf.head(0).to_dataframe()
self.__test_equal(sf2, exp_df)
# wrong size
with self.assertRaises(IndexError):
sf2 = sf[SArray([0,1,205])]
# slightly bigger size
sf = gl.SFrame()
n = 1000000
sf['a'] = range(n)
result = sf[sf['a'] == -1]
self.assertEquals(len(result), 0)
result = sf[sf['a'] > n - 123]
self.assertEquals(len(result), 122)
l = list(result['a'])
for i in range(len(result)):
self.assertEquals(i + n - 122, l[i])
result = sf[sf['a'] < 2000]
self.assertEquals(len(result), 2000)
l = list(result['a'])
for i in range(len(result)):
self.assertEquals(i, l[i])
def test_sample_split(self):
sf = SFrame(data=self.__create_test_df(100))
entry_list = set()
for i in sf:
entry_list.add(str(i))
sample_sf = sf.sample(.12, 9)
sample_sf2 = sf.sample(.12, 9)
self.assertEqual(len(sample_sf), len(sample_sf2))
assert_frame_equal(sample_sf.head().to_dataframe(), sample_sf2.head().to_dataframe())
for i in sample_sf:
self.assertTrue(str(i) in entry_list)
with self.assertRaises(ValueError):
sf.sample(3)
sample_sf = SFrame().sample(.12, 9)
self.assertEqual(len(sample_sf), 0)
a_split = sf.random_split(.12, 9)
first_split_entries = set()
for i in a_split[0]:
first_split_entries.add(str(i))
for i in a_split[1]:
self.assertTrue(str(i) in entry_list)
self.assertTrue(str(i) not in first_split_entries)
with self.assertRaises(ValueError):
sf.random_split(3)
self.assertEqual(len(SFrame().random_split(.4)[0]), 0)
self.assertEqual(len(SFrame().random_split(.4)[1]), 0)
# tests add_column, rename
def test_edit_column_ops(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.float_data))
sf.add_column(SArray(self.string_data))
# Make sure auto names work
names = sf.column_names()
cntr = 1
for i in names:
self.assertEquals("X"+str(cntr), i)
cntr = cntr + 1
# Remove a column
del sf['X2']
# names
names = sf.column_names()
self.assertEquals(len(names), 2)
self.assertEquals('X1', names[0])
self.assertEquals('X3', names[1])
# check content
self.assertEquals(sf['X1'].head(10), self.int_data)
self.assertEquals(sf['X3'].head(10), self.string_data)
# check that a new automatically named column will not conflict
sf.add_column(SArray(self.string_data))
names = sf.column_names()
self.assertEquals(len(names), 3)
uniq_set = set()
for i in names:
uniq_set.add(i)
if len(uniq_set) == 1:
self.assertEquals(list(sf[i].head(10)), self.int_data)
else:
self.assertEquals(list(sf[i].head(10)), self.string_data)
self.assertEquals(len(uniq_set), 3)
# replacing columns preserves order
names = sf.column_names()
for n in names:
sf[n] = sf[n].apply(lambda x: x)
self.assertEquals(sf.column_names(), names)
# do it again!
del sf['X1']
sf.add_column(SArray(self.string_data))
names = sf.column_names()
self.assertEquals(len(names), 3)
uniq_set = set()
for i in names:
uniq_set.add(i)
self.assertEquals(list(sf[i].head(10)), self.string_data)
self.assertEquals(len(uniq_set), len(names))
# standard rename
rename_dict = {'X3':'data','X3.1':'more_data','X3.2':'even_more'}
sf.rename(rename_dict)
self.assertEquals(sf.column_names(), ['data','more_data','even_more'])
# rename a column to a name that's already taken
with self.assertRaises(RuntimeError):
sf.rename({'data':'more_data'})
# try to rename a column that doesn't exist
with self.assertRaises(ValueError):
sf.rename({'foo':'bar'})
# pass something other than a dict
with self.assertRaises(TypeError):
sf.rename('foo')
# Setting a column to const preserves order
names = sf.column_names()
for n in names:
sf[n] = 1
self.assertEquals(sf.column_names(), names)
def test_remove_column(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.float_data))
sf.add_column(SArray(self.string_data))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
sf2 = sf.remove_column('X3')
assert sf is sf2
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X4', 'X5'])
sf2 = sf.remove_columns(['X2', 'X5'])
assert sf is sf2
self.assertEquals(sf.column_names(), ['X1', 'X4'])
# with a generator expression
sf2 = sf.remove_columns((n for n in ['X1', 'X5'] if n in sf.column_names()))
assert sf is sf2
self.assertEquals(sf.column_names(), ['X4'])
def test_remove_bad_column(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.float_data))
sf.add_column(SArray(self.string_data))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
self.assertRaises(KeyError, lambda: sf.remove_column('bad'))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
self.assertRaises(KeyError, lambda: sf.remove_columns(['X1', 'X2', 'X3', 'bad', 'X4']))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
def __generate_synthetic_sframe__(self, num_users):
"""
synthetic collaborative data.
generate 1000 users, user i watched movie 0, ... i-1.
rating(i, j) = i + j
length(i, j) = i - j
"""
sf = SFrame()
sparse_matrix = {}
for i in range(1, num_users + 1):
sparse_matrix[i] = [(j, i + j, i - j) for j in range(1, i + 1)]
user_ids = []
movie_ids = []
ratings = []
length_of_watching = []
for u in sparse_matrix:
user_ids += [u] * len(sparse_matrix[u])
movie_ids += [x[0] for x in sparse_matrix[u]]
ratings += [x[1] for x in sparse_matrix[u]]
length_of_watching += [x[2] for x in sparse_matrix[u]]
# typical add column stuff
sf['user_id'] = (SArray(user_ids, int))
sf['movie_id'] = (SArray(movie_ids, str))
sf['rating'] = (SArray(ratings, float))
sf['length'] = (SArray(length_of_watching, int))
return sf
def test_aggregate_ops(self):
"""
Test builtin groupby aggregators
"""
for m in [1, 10, 20, 50, 100]:
values = range(m)
vector_values = [[random.randint(1,100) for num in range(10)] for y in range(m)]
sf = gl.SFrame()
sf['key'] = [1] * m
sf['value'] = values
sf['vector_values'] = vector_values
sf.__materialize__()
built_ins = [gl.aggregate.COUNT(), gl.aggregate.SUM('value'),
gl.aggregate.AVG('value'), gl.aggregate.MIN('value'), gl.aggregate.MAX('value'),
gl.aggregate.VAR('value'), gl.aggregate.STDV('value'), gl.aggregate.SUM('vector_values'), gl.aggregate.MEAN('vector_values')]
sf2 = sf.groupby('key', built_ins)
self.assertEqual(sf2['Count'], m)
self.assertEqual(sf2['Sum of value'], sum(values))
self.assertEqual(sf2['Avg of value'], np.mean(values))
self.assertEqual(sf2['Min of value'], min(values))
self.assertEqual(sf2['Max of value'], max(values))
self.assertEqual(sf2['Var of value'], np.var(values))
self.assertEqual(sf2['Stdv of value'], np.std(values))
self.assertEqual(sf2['Vector Sum of vector_values'], np.sum(vector_values, axis=0))
self.assertEqual(sf2['Vector Avg of vector_values'], np.mean(vector_values, axis=0))
# For vectors
def test_aggregate_ops_on_lazy_frame(self):
"""
Test builtin groupby aggregators
"""
for m in [1, 10, 20, 50, 100]:
values = range(m)
vector_values = [[random.randint(1,100) for num in range(10)] for y in range(m)]
sf = gl.SFrame()
sf['key'] = [1] * m
sf['value'] = values
sf['vector_values'] = vector_values
sf['value'] = sf['value'] + 0
built_ins = [gl.aggregate.COUNT(), gl.aggregate.SUM('value'),
gl.aggregate.AVG('value'), gl.aggregate.MIN('value'), gl.aggregate.MAX('value'),
gl.aggregate.VAR('value'), gl.aggregate.STDV('value'), gl.aggregate.SUM('vector_values'), gl.aggregate.MEAN('vector_values')]
sf2 = sf.groupby('key', built_ins)
self.assertEqual(sf2['Count'], m)
self.assertEqual(sf2['Sum of value'], sum(values))
self.assertEqual(sf2['Avg of value'], np.mean(values))
self.assertEqual(sf2['Min of value'], min(values))
self.assertEqual(sf2['Max of value'], max(values))
self.assertEqual(sf2['Var of value'], np.var(values))
self.assertEqual(sf2['Stdv of value'], np.std(values))
self.assertEqual(sf2['Vector Sum of vector_values'], np.sum(vector_values, axis=0))
self.assertEqual(sf2['Vector Avg of vector_values'], np.mean(vector_values, axis=0))
def test_aggregate_ops2(self):
"""
Test builtin groupby aggregators using explicit named columns
"""
for m in [1, 10, 20, 50, 100]:
values = range(m)
vector_values = [[random.randint(1,100) for num in range(10)] for y in range(m)]
sf = gl.SFrame()
sf['key'] = [1] * m
sf['value'] = values
sf['vector_values'] = vector_values
built_ins = {'count':gl.aggregate.COUNT, 'sum':gl.aggregate.SUM('value'),
'avg':gl.aggregate.AVG('value'),
'avg2':gl.aggregate.MEAN('value'), 'min':gl.aggregate.MIN('value'), 'max':gl.aggregate.MAX('value'),
'var':gl.aggregate.VAR('value'), 'var2':gl.aggregate.VARIANCE('value'),
'stdv':gl.aggregate.STD('value'), 'stdv2':gl.aggregate.STDV('value'),'vector_sum': gl.aggregate.SUM('vector_values'),'vector_mean': gl.aggregate.MEAN('vector_values')}
sf2 = sf.groupby('key', built_ins)
self.assertEqual(sf2['count'], m)
self.assertEqual(sf2['sum'], sum(values))
self.assertEqual(sf2['avg'], np.mean(values))
self.assertEqual(sf2['avg2'], np.mean(values))
self.assertEqual(sf2['min'], min(values))
self.assertEqual(sf2['max'], max(values))
self.assertEqual(sf2['var'], np.var(values))
self.assertEqual(sf2['var2'], np.var(values))
self.assertEqual(sf2['stdv'], np.std(values))
self.assertEqual(sf2['stdv2'], np.std(values))
self.assertEqual(sf2['vector_sum'], np.sum(vector_values, axis=0))
self.assertEqual(sf2['vector_mean'], np.mean(vector_values, axis=0))
def test_groupby(self):
"""
Test builtin groupby and aggregate on different column types
"""
num_users = 500
sf = self.__generate_synthetic_sframe__(num_users=num_users)
built_ins = [gl.aggregate.COUNT(), gl.aggregate.SUM('rating'),
gl.aggregate.AVG('rating'), gl.aggregate.MIN('rating'), gl.aggregate.MAX('rating'),
gl.aggregate.VAR('rating'), gl.aggregate.STDV('rating')]
built_in_names = ['Sum', 'Avg', 'Min', 'Max', 'Var', 'Stdv']
"""
Test groupby user_id and aggregate on rating
"""
sf_user_rating = sf.groupby('user_id', built_ins)
actual = sf_user_rating.column_names()
expected = ['%s of rating' % v for v in built_in_names] + ['user_id'] + ['Count']
self.assertSetEqual(set(actual), set(expected))
for row in sf_user_rating:
uid = row['user_id']
mids = range(1, uid + 1)
ratings = [uid + i for i in mids]
expected = [len(ratings), sum(ratings), np.mean(ratings), min(ratings), max(ratings), np.var(ratings), np.sqrt(np.var(ratings))]
actual = [row['Count']] + [row['%s of rating' % op] for op in built_in_names]
for i in range(len(actual)):
self.assertAlmostEqual(actual[i], expected[i])
"""
Test that count can be applied on empty aggregate column.
"""
sf_user_rating = sf.groupby("user_id", {'counter': gl.aggregate.COUNT()})
actual = {x['user_id']: x['counter'] for x in sf_user_rating}
expected = {i: i for i in range(1, num_users + 1)}
self.assertDictEqual(actual, expected)
"""
Test groupby movie_id and aggregate on length_of_watching
"""
built_ins = [gl.aggregate.COUNT(), gl.aggregate.SUM('length'),
gl.aggregate.AVG('length'), gl.aggregate.MIN('length'), gl.aggregate.MAX('length'),
gl.aggregate.VAR('length'), gl.aggregate.STDV('length')]
sf_movie_length = sf.groupby('movie_id', built_ins)
actual = sf_movie_length.column_names()
expected = ['%s of length' % v for v in built_in_names] + ['movie_id'] + ['Count']
self.assertSetEqual(set(actual), set(expected))
for row in sf_movie_length:
mid = row['movie_id']
uids = range(int(mid), num_users + 1)
values = [i - int(mid) for i in uids]
expected = [len(values), sum(values), np.mean(values), min(values), max(values), np.var(values), np.std(values)]
actual = [row['Count']] + [row['%s of length' % op] for op in built_in_names]
for i in range(len(actual)):
self.assertAlmostEqual(actual[i], expected[i])
def test_quantile_groupby(self):
sf = self.__generate_synthetic_sframe__(num_users=500)
# max and min rating for each user
g = sf.groupby('user_id', [gl.aggregate.MIN('rating'),
gl.aggregate.MAX('rating'),
gl.aggregate.QUANTILE('rating', 0, 1)])
self.assertEquals(len(g), 500)
for row in g:
minrating = row['Min of rating']
maxrating = row['Max of rating']
arr = list(row['Quantiles of rating'])
self.assertEquals(len(arr), 2)
self.assertEquals(arr[0], minrating)
self.assertEquals(arr[1], maxrating)
def test_argmax_argmin_groupby(self):
sf = self.__generate_synthetic_sframe__(num_users=500)
sf_ret = sf.groupby('user_id', {'movie with max rating':gl.aggregate.ARGMAX('rating','movie_id'),
'movie with min rating':gl.aggregate.ARGMIN('rating','movie_id')})
self.assertEquals(len(sf_ret), 500)
self.assertEqual(sf_ret["movie with max rating"].dtype(), str)
self.assertEqual(sf_ret["movie with min rating"].dtype(), str)
self.assertEqual(sf_ret["user_id"].dtype(), int)
# make sure we have computed correctly.
max_d = {}
min_d = {}
for i in sf:
key = i['user_id']
if key not in max_d:
max_d[key] = (i['movie_id'],i['rating'])
min_d[key] = (i['movie_id'],i['rating'])
else:
if max_d[key][1] < i['rating']:
max_d[key] = (i['movie_id'],i['rating'])
if min_d[key][1] > i['rating']:
min_d[key] = (i['movie_id'],i['rating'])
for i in sf_ret:
key = i['user_id']
self.assertEqual(i["movie with max rating"],max_d[key][0])
self.assertEqual(i["movie with min rating"],min_d[key][0])
def test_multicolumn_groupby(self):
sf = self.__generate_synthetic_sframe__(num_users=500)
sf_um = sf.groupby(["user_id", "movie_id"], gl.aggregate.COUNT)
# I can query it
t = sf_um.to_dataframe()
self.assertEqual(sf_um["user_id"].dtype(), int)
self.assertEqual(sf_um["movie_id"].dtype(), str)
# make sure we have counted correctly
d = {}
for i in sf:
key = str(i['user_id']) + "," + i["movie_id"]
if key not in d:
d[key] = 0
d[key] = d[key] + 1
for i in sf_um:
key = str(i['user_id']) + "," + i["movie_id"]
self.assertTrue(key in d)
self.assertEqual(i['Count'], d[key])
sf_um = sf.groupby(["movie_id", "user_id"], gl.aggregate.COUNT())
# I can query it
t = sf_um.to_dataframe()
self.assertEqual(sf_um["user_id"].dtype(), int)
self.assertEqual(sf_um["movie_id"].dtype(), str)
# make sure we have counted correctly
d = {}
for i in sf:
key = str(i['user_id']) + "," + i["movie_id"]
if key not in d:
d[key] = 0
d[key] = d[key] + 1
for i in sf_um:
key = str(i['user_id']) + "," + i["movie_id"]
self.assertTrue(key in d)
self.assertEqual(i['Count'], d[key])
def __assert_concat_result_equal(self, result, expected, list_columns):
self.assertEqual(result.num_columns(), expected.num_columns())
for column in result.column_names():
c1 = result[column]
c2 = expected[column]
self.assertEqual(c1.dtype(), c2.dtype())
self.assertEqual(c1.size(), c2.size())
if (column in list_columns):
for i in range(len(c1)):
if (c1[i] == None):
self.assertTrue(c2[i] == None)
continue
if (c1.dtype() == dict):
for k in c1[i]:
self.assertEqual(c2[i][k], c1[i][k])
else:
s1 = list(c1[i]);
if s1 != None: s1.sort()
s2 = list(c2[i]);
if s2 != None: s2.sort()
self.assertEqual(s1, s2)
else:
self.assertEqual(list(c1),list(c2))
def test_groupby_dict_key(self):
t = gl.SFrame({'a':[{1:2},{3:4}]})
with self.assertRaises(TypeError):
t.groupby('a', {})
def test_concat(self):
sf = SFrame()
sf['a'] = [1,1,1,1, 2,2,2, 3, 4,4, 5]
sf['b'] = [1,2,1,2, 3,3,1, 4, None, 2, None]
sf['c'] = ['a','b','a','b', 'e','e', None, 'h', 'i','j', 'k']
sf['d'] = [1.0,2.0,1.0,2.0, 3.0,3.0,1.0, 4.0, None, 2.0, None]
result = sf.groupby('a', gl.aggregate.CONCAT('b'))
expected_result = SFrame({
'a': [1,2,3,4, 5],
'List of b': [[1,1,2,2],[1,3,3],[4],[2], []]
})
expected_result['List of b'] = expected_result['List of b'].astype(list)
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['List of b'])
result = sf.groupby('a', gl.aggregate.CONCAT('d'))
expected_result = SFrame({
'a': [1,2,3,4, 5],
'List of d': [[1,1,2,2],[1,3,3],[4],[2], []]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['List of d'])
result = sf.groupby('a', {'c_c' :gl.aggregate.CONCAT('c')})
expected_result = SFrame({
'a': [1,2,3,4, 5],
'c_c': [['a','b','a','b'],['e','e'],['h'],['i','j'], ['k']]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['c_c'])
result = sf.groupby('a', gl.aggregate.CONCAT('b','c'))
expected_result = SFrame({
'a': [1,2,3,4,5],
'Dict of b_c': [{1:'a',2:'b'},{3:'e', 1: None},{4:'h'},{2:'j'}, {}]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['Dict of b_c'])
result = sf.groupby('a', {'c_b':gl.aggregate.CONCAT('c','b')})
expected_result = SFrame({
'a': [1,2,3,4,5],
'c_b': [{'a':1, 'b':2},{'e':3},{'h':4},{'i':None, 'j':2},{'k':None}]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['c_b'])
result = sf.groupby('a', {'cs':gl.aggregate.CONCAT('c'), 'bs':gl.aggregate.CONCAT('b')})
expected_result = SFrame({
'a': [1,2,3,4,5],
'bs': [[1,1,2,2],[1,3,3],[4],[2], []],
'cs': [['a','b','a','b'],['e','e'],['h'],['i','j'], ['k']]
})
expected_result['bs'] = expected_result['bs'].astype(list)
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['bs','cs'])
#exception fail if there is not column
with self.assertRaises(TypeError):
sf.groupby('a', gl.aggregate.CONCAT())
with self.assertRaises(KeyError):
sf.groupby('a', gl.aggregate.CONCAT('nonexist'))
def test_select_one(self):
sf = SFrame({'a':[1,1,2,2,3,3,4,4,5,5],'b':[1,2,3,4,5,6,7,8,9,10]})
res = list(sf.groupby('a', {'b':gl.aggregate.SELECT_ONE('b')}))
self.assertEqual(len(res), 5)
for i in res:
self.assertTrue(i['b'] == 2 * i['a'] or i['b'] == 2 * i['a'] - 1)
def test_unique(self):
sf = SFrame({'a':[1,1,2,2,3,3,4,4,5,5],'b':[1,2,3,4,5,6,7,8,9,10]})
self.assertEqual(len(sf.unique()), 10)
vals = [1,1,2,2,3,3,4,4, None, None]
sf = SFrame({'a':vals,'b':vals})
res = sf.unique()
self.assertEqual(len(res), 5)
self.assertEqual(sorted(list(res['a'])), sorted([1,2,3,4,None]))
self.assertEqual(sorted(list(res['b'])), sorted([1,2,3,4,None]))
def test_append_all_match(self):
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
new_sf = sf1.append(sf2)
assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())
def test_append_lazy(self):
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
new_sf = sf1.append(sf2)
self.assertTrue(new_sf.__is_materialized__())
filter_sf1 = SArray([1 for i in range(sf1.num_rows())] + [0 for i in range(sf2.num_rows())])
filter_sf2 = SArray([0 for i in range(sf1.num_rows())] + [1 for i in range(sf2.num_rows())])
new_sf1 = new_sf[filter_sf1]
new_sf2 = new_sf[filter_sf2]
assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())
assert_frame_equal(sf1.to_dataframe(), new_sf1.to_dataframe())
assert_frame_equal(sf2.to_dataframe(), new_sf2.to_dataframe())
row = sf1.head(1)
sf = SFrame()
for i in range(10):
sf = sf.append(row)
df = sf.to_dataframe()
for i in range(10):
self.assertEqual(list(df.iloc[[i]]), list(sf.head(1).to_dataframe().iloc[[0]]))
def test_recursive_append(self):
sf = SFrame()
for i in range(200):
sf = sf.append(SFrame(data = self.dataframe))
#consume
sf.__materialize__()
def test_print_sframe(self):
sf = SFrame()
def _test_print():
sf.__repr__()
sf._repr_html_()
sf.print_rows()
n = 20
sf['int'] = [i for i in range(n)]
sf['float'] = [float(i) for i in range(n)]
sf['str'] = [str(i) for i in range(n)]
uc = '\xe5\xa4\xa7\xe5\xa4\xb4' # dato pronounced in chinese, big head
sf['unicode'] = [uc for i in range(n)]
sf['array'] = [array.array('d', [i]) for i in range(n)]
sf['list'] = [[i, float(i), [i]] for i in range(n)]
utc = dt.datetime.strptime('2011-01-21 02:37:21', '%Y-%m-%d %H:%M:%S')
sf['dt'] = [utc for i in range(n)]
sf['img'] = [Image() for i in range(n)]
sf['long_str'] = ["".join([str(i)] * 50) for i in range(n)]
sf['long_unicode'] = ["".join([uc] * 50) for i in range(n)]
sf['bad_unicode'] = ['\x9d' + uc for i in range(n)]
_test_print()
def test_print_lazy_sframe(self):
sf1 = SFrame(data=self.dataframe)
self.assertTrue(sf1.__is_materialized__())
sf2 = sf1[sf1['int_data'] > 3]
sf2.__repr__()
sf2.__str__()
self.assertFalse(sf2.__is_materialized__())
len(sf2)
self.assertTrue(sf2.__is_materialized__())
def test_append_order_diff(self):
# name match but column type order not match
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
sf2.swap_columns('int_data', 'string_data')
new_sf = sf1.append(sf2)
assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())
def test_append_empty_sframe(self):
sf = SFrame(data=self.dataframe)
other = SFrame()
# non empty append empty
assert_frame_equal(sf.append(other).to_dataframe(), self.dataframe)
# empty append non empty
assert_frame_equal(other.append(sf).to_dataframe(), self.dataframe)
#empty append empty
assert_frame_equal(other.append(other).to_dataframe(), pd.DataFrame())
def test_append_exception(self):
sf = SFrame(data=self.dataframe)
# column number not match
other = SFrame()
other.add_column(SArray(), "test")
self.assertRaises(RuntimeError, lambda: sf.append(other)) # column not the same
# column name not match
other = SFrame()
names = sf.column_names()
for name in sf.column_names():
other.add_column(SArray(), name)
names[0] = 'some name not match'
self.assertRaises(RuntimeError, lambda: sf.append(other))
# name match but column type order not match
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
#change one column type
sf1["int_data"] = sf2.select_column("int_data").astype(float)
self.assertRaises(RuntimeError, lambda: sf.append(other))
def test_simple_joins(self):
inner_expected = SFrame()
inner_expected.add_column(SArray(['Robinson','Jones','Smith','Heisenberg','Rafferty']), 'last_name')
inner_expected.add_column(SArray([34,33,34,33,31]), 'dep_id')
inner_expected.add_column(SArray(['Clerical','Engineering','Clerical','Engineering','Sales']), 'dep_name')
# Tests the "natural join" case
beg = time.time()
res = self.employees_sf.join(self.departments_sf)
end = time.time()
print "Really small join: " + str(end-beg) + " s"
self.__assert_join_results_equal(res, inner_expected)
left_join_row = SFrame()
left_join_row.add_column(SArray(['John']), 'last_name')
left_join_row.add_column(SArray([None], int), 'dep_id')
left_join_row.add_column(SArray([None], str), 'dep_name')
left_expected = inner_expected.append(left_join_row)
# Left outer join, passing string to 'on'
res = self.employees_sf.join(self.departments_sf, how='left', on='dep_id')
self.__assert_join_results_equal(res, left_expected)
right_join_row = SFrame()
right_join_row.add_column(SArray([None], str), 'last_name')
right_join_row.add_column(SArray([35]), 'dep_id')
right_join_row.add_column(SArray(['Marketing']), 'dep_name')
right_expected = inner_expected.append(right_join_row)
# Right outer join, passing list to 'on'
res = self.employees_sf.join(self.departments_sf, how='right', on=['dep_id'])
self.__assert_join_results_equal(res, right_expected)
outer_expected = left_expected.append(right_join_row)
# Full outer join, passing dict to 'on'
res = self.employees_sf.join(self.departments_sf, how='outer', on={'dep_id':'dep_id'})
self.__assert_join_results_equal(res, outer_expected)
# Test a join on non-matching key
res = self.employees_sf.join(self.departments_sf, on={'last_name':'dep_name'})
self.assertEquals(res.num_rows(), 0)
self.assertEquals(res.num_cols(), 3)
self.assertEquals(res.column_names(), ['last_name', 'dep_id', 'dep_id.1'])
# Test a join on a non-unique key
bad_departments = SFrame()
bad_departments['dep_id'] = SArray([33,33,31,31])
bad_departments['dep_name'] = self.departments_sf['dep_name']
no_pk_expected = SFrame()
no_pk_expected['last_name'] = SArray(['Rafferty','Rafferty','Heisenberg','Jones','Heisenberg','Jones'])
no_pk_expected['dep_id'] = SArray([31,31,33,33,33,33])
no_pk_expected['dep_name'] = SArray(['Clerical','Marketing','Sales','Sales','Engineering','Engineering'])
res = self.employees_sf.join(bad_departments, on='dep_id')
self.__assert_join_results_equal(res, no_pk_expected)
# Left join on non-unique key
bad_departments = bad_departments.append(right_join_row[['dep_id', 'dep_name']])
bad_departments = bad_departments.append(right_join_row[['dep_id', 'dep_name']])
no_pk_expected = no_pk_expected.append(right_join_row)
no_pk_expected = no_pk_expected.append(right_join_row)
no_pk_expected = no_pk_expected[['dep_id', 'dep_name', 'last_name']]
res = bad_departments.join(self.employees_sf, on='dep_id', how='left')
self.__assert_join_results_equal(res, no_pk_expected)
def test_big_composite_join(self):
# Create a semi large SFrame with composite primary key (letter, number)
letter_keys = []
number_keys = []
data = []
for i in string.ascii_lowercase:
for j in range(0,100):
letter_keys.append(i)
number_keys.append(j)
which = j % 3
if which == 0:
data.append(string.ascii_uppercase)
elif which == 1:
data.append(string.digits)
elif which == 2:
data.append(string.hexdigits)
pk_gibberish = SFrame()
pk_gibberish['letter'] = SArray(letter_keys, str)
pk_gibberish['number'] = SArray(number_keys, int)
pk_gibberish['data'] = SArray(data, str)
# Some rows that won't match
more_data = []
more_letter_keys = []
more_number_keys = []
for i in range(0,40000):
more_data.append('fish')
more_letter_keys.append('A')
more_number_keys.append(200)
for i in range(0,80):
for j in range(100,1000):
more_data.append('waffles')
more_letter_keys.append(letter_keys[j])
more_number_keys.append(number_keys[j])
# Non-matching row in this stretch
if j == 147:
more_letter_keys[-1] = 'A'
for i in range(0,5000):
more_data.append('pizza')
more_letter_keys.append('Z')
more_number_keys.append(400)
join_with_gibberish = SFrame()
join_with_gibberish['data'] = SArray(more_data, str)
join_with_gibberish['moredata'] = SArray(more_data, str)
join_with_gibberish['a_number'] = SArray(more_number_keys, int)
join_with_gibberish['a_letter'] = SArray(more_letter_keys, str)
expected_answer = SFrame()
exp_letter = []
exp_number = []
exp_data = []
for i in range(0,80):
exp_letter.extend(letter_keys[100:147])
exp_number.extend(number_keys[100:147])
exp_letter.extend(letter_keys[148:1000])
exp_number.extend(number_keys[148:1000])
exp_data.extend(data[100:147])
exp_data.extend(data[148:1000])
expected_answer['letter'] = SArray(exp_letter, str)
expected_answer['number'] = SArray(exp_number, int)
expected_answer['data'] = SArray(exp_data, str)
expected_answer['data.1'] = 'waffles'
expected_answer['moredata'] = 'waffles'
beg = time.time()
res = pk_gibberish.join(join_with_gibberish, on={'letter':'a_letter','number':'a_number'})
end = time.time()
print "Join took " + str(end-beg) + " seconds"
self.__assert_join_results_equal(res, expected_answer)
def test_convert_dataframe_empty(self):
sf = SFrame()
sf['a'] = gl.SArray([], int)
df = sf.to_dataframe()
self.assertEqual(df['a'].dtype, int)
sf1 = SFrame(df)
self.assertEquals(sf1['a'].dtype(), int)
self.assertEqual(sf1.num_rows(), 0)
def test_replace_one_column(self):
sf = SFrame()
sf['a'] = [1,2,3]
self.assertEquals(sf['a'], [1,2,3])
# this should succeed as we are replacing a new column
sf['a'] = [1,2]
self.assertEquals(sf['a'], [1,2])
# failed to add new column should revert original sframe
with self.assertRaises(TypeError):
sf['a'] = [1,2,'a']
self.assertEquals(sf['a'], [1,2])
# add a column with different length should fail if there are more than one column
sf = SFrame()
sf['a'] = [1,2,3]
sf['b'] = ['a', 'b', 'c']
with self.assertRaises(RuntimeError):
sf['a'] = [1,2]
def test_filter_by(self):
# Set up SFrame to filter by
sf = SFrame()
sf.add_column(SArray(self.int_data), "ints")
sf.add_column(SArray(self.float_data), "floats")
sf.add_column(SArray(self.string_data), "strings")
# Normal cases
res = sf.filter_by(SArray(self.int_data), "ints")
self.__assert_join_results_equal(res, sf)
res = sf.filter_by(SArray(self.int_data), "ints", exclude=True)
self.assertEquals(list(res), [])
res = sf.filter_by([5,6], "ints")
exp = SFrame()
exp.add_column(SArray(self.int_data[4:6]), "ints")
exp.add_column(SArray(self.float_data[4:6]), "floats")
exp.add_column(SArray(self.string_data[4:6]), "strings")
self.__assert_join_results_equal(res, exp)
exp_opposite = SFrame()
exp_opposite.add_column(SArray(self.int_data[:4]+self.int_data[6:]), "ints")
exp_opposite.add_column(SArray(self.float_data[:4]+self.float_data[6:]), "floats")
exp_opposite.add_column(SArray(self.string_data[:4]+self.string_data[6:]), "strings")
res = sf.filter_by([5,6], "ints", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
exp_one = SFrame()
exp_one.add_column(SArray(self.int_data[4:5]), "ints")
exp_one.add_column(SArray(self.float_data[4:5]), "floats")
exp_one.add_column(SArray(self.string_data[4:5]), "strings")
exp_all_but_one = SFrame()
exp_all_but_one.add_column(SArray(self.int_data[:4]+self.int_data[5:]), "ints")
exp_all_but_one.add_column(SArray(self.float_data[:4]+self.float_data[5:]), "floats")
exp_all_but_one.add_column(SArray(self.string_data[:4]+self.string_data[5:]), "strings")
res = sf.filter_by(5, "ints")
self.__assert_join_results_equal(res, exp_one)
res = sf.filter_by(5, "ints", exclude=True)
self.__assert_join_results_equal(res, exp_all_but_one)
res = sf.filter_by("5", "strings")
self.__assert_join_results_equal(res, exp_one)
res = sf.filter_by(5, "ints", exclude=True)
self.__assert_join_results_equal(res, exp_all_but_one)
# Only missing values
res = sf.filter_by([77,77,88,88], "ints")
# Test against empty SFrame with correct columns/types
self.__assert_join_results_equal(res, exp_one[exp_one['ints'] == 9000])
res = sf.filter_by([77,77,88,88], "ints", exclude=True)
self.__assert_join_results_equal(res, sf)
# Duplicate values
res = sf.filter_by([6,6,5,5,6,5,5,6,5,5,5], "ints")
self.__assert_join_results_equal(res, exp)
res = sf.filter_by([6,6,5,5,6,5,5,6,5,5,5], "ints", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
# Duplicate and missing
res = sf.filter_by([11,12,46,6,6,55,5,5], "ints")
self.__assert_join_results_equal(res, exp)
res = sf.filter_by([11,12,46,6,6,55,5,5], "ints", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
# Type mismatch
with self.assertRaises(TypeError):
res = sf.filter_by(["hi"], "ints")
# Column doesn't exist
with self.assertRaises(KeyError):
res = sf.filter_by([1,2], "intssss")
# Something that can't be turned into an SArray
with self.assertRaises(Exception):
res = sf.filter_by({1:2,3:4}, "ints")
# column_name not given as string
with self.assertRaises(TypeError):
res = sf.filter_by(1,2)
# Duplicate column names after join. Should be last because of the
# renames.
sf.rename({'ints':'id','floats':'id1','strings':'id11'})
exp.rename({'ints':'id','floats':'id1','strings':'id11'})
exp_opposite.rename({'ints':'id','floats':'id1','strings':'id11'})
res = sf.filter_by([5,6], "id")
self.__assert_join_results_equal(res, exp)
res = sf.filter_by([5,6], "id", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
def __test_to_from_dataframe(self, data, type):
sf = SFrame()
sf['a'] = data
df = sf.to_dataframe()
sf1 = SFrame(df)
self.assertTrue(sf1.dtype()[0]== type)
df = pd.DataFrame({'val': data})
sf1 = SFrame(df)
self.assertTrue(sf1.dtype()[0]== type)
def test_to_from_dataframe(self):
self.__test_to_from_dataframe([1,2,3], int)
self.__test_to_from_dataframe(['a', 'b', 'c'], str)
self.__test_to_from_dataframe([1.0, 2.0, 3.0], float)
self.__test_to_from_dataframe([[1, 'b', {'a': 1}], [1,2,3]], list)
self.__test_to_from_dataframe([{'a':1, 1:None}, {'b':2}], dict)
self.__test_to_from_dataframe([[1,2],[1,2],[]], array.array)
def test_pack_columns_exception(self):
sf = SFrame()
sf['a'] = [1, 2, 3, None, None]
sf['b'] = [None, '2', '3', None, '5']
sf['c'] = [None, 2.0, 3.0, None, 5.0]
# cannot pack non array value into array
with self.assertRaises(TypeError):
sf.pack_columns(dtype=array.array)
# cannnot given non numeric na vlaue to array
with self.assertRaises(ValueError):
sf.pack_columns(dtype=array.array, fill_na='c')
# cannot pack non exist columns
with self.assertRaises(ValueError):
sf.pack_columns(['d','a'])
# cannot pack less than two columns
with self.assertRaises(ValueError):
sf.pack_columns(['a'])
# dtype has to be dict/array/list
with self.assertRaises(ValueError):
sf.pack_columns(dtype=str)
# pack duplicate columns
with self.assertRaises(ValueError):
sf.pack_columns(['a','a'])
# pack partial columns to array, should fail if for columns that are not numeric
with self.assertRaises(TypeError):
sf.pack_columns(['a','b'], dtype=array.array)
with self.assertRaises(TypeError):
sf.pack_columns(column_prefix = 1)
with self.assertRaises(ValueError):
sf.pack_columns(column_prefix = '1')
with self.assertRaises(ValueError):
sf.pack_columns(column_prefix = 'c', columns=['a', 'b'])
def test_pack_columns2(self):
from graphlab import SFrame, SArray
sf = SFrame()
sf['id'] = [1, 2, 3, 4]
sf['category.a'] = [None, '2', '3', None]
sf['category.b'] = [None, 2.0, None, 4.0]
expected = SArray([
[None, None],
['2', 2.0],
['3', None],
[None, 4.0]])
result = sf.pack_columns(column_prefix='category')
self.assertEqual(result.column_names(), ['id', 'category'])
self.__assert_sarray_equal(result['id'], sf['id'])
self.__assert_sarray_equal(result['category'], expected)
result = sf.pack_columns(column_prefix='category', new_column_name="new name")
self.assertEqual(result.column_names(), ['id', 'new name'])
self.__assert_sarray_equal(result['id'], sf['id'])
self.__assert_sarray_equal(result['new name'], expected)
# default dtype is list
result = sf.pack_columns(column_prefix='category', dtype=list)
self.assertEqual(result.column_names(), ['id', 'category'])
self.__assert_sarray_equal(result['category'], expected)
# remove prefix == True by default
expected = SArray([
{},
{'a':'2', 'b':2.0},
{'a':'3'},
{'b':4.0}
])
result = sf.pack_columns(column_prefix='category', dtype=dict)
self.__assert_sarray_equal(result['category'], expected)
# remove prefix == False
expected = SArray([
{},
{'category.a':'2', 'category.b':2.0},
{'category.a':'3'},
{'category.b':4.0}
])
result = sf.pack_columns(column_prefix='category', dtype=dict, remove_prefix=False)
self.assertEqual(result.column_names(), ['id', 'category'])
self.__assert_sarray_equal(result['category'], expected)
# fill_na
expected = SArray([
{'a':1, 'b':1},
{'a':'2', 'b':2.0},
{'a':'3', 'b':1},
{'a':1, 'b':4.0}
])
result = sf.pack_columns(column_prefix='category', dtype=dict, fill_na = 1)
self.__assert_sarray_equal(result['category'], expected)
def test_pack_columns(self):
sf = SFrame()
sf['id'] = [1, 2, 3, 4, 5]
sf['b'] = [None, '2', '3', None, '5']
sf['c'] = [None, 2.0, 3.0, None, 5.0]
expected_all_default = SArray([
[1, None, None],
[2, '2', 2.0],
[3, '3', 3.0],
[4, None, None],
[5, '5', 5.0]
])
# pack all columns, all default values
self.__assert_sarray_equal(sf.pack_columns()['X1'], expected_all_default)
expected_ab_default = SArray([
[1, None],
[2, '2'],
[3, '3'],
[4, None],
[5, '5']
])
expected_all_fillna_1 = SArray([
[1, -1, -1],
[2, '2', 2.0],
[3, '3', 3.0],
[4, -1, -1],
[5, '5', 5.0]
])
# pack all columns do not drop na and also fill with some value
result = sf.pack_columns(fill_na=-1)
self.assertEqual(result.column_names(), ['X1'])
self.__assert_sarray_equal(result['X1'], expected_all_fillna_1)
# pack partial columns, all default value
result = sf.pack_columns(['id','b'])
self.assertEqual(result.column_names(), ['c','X2'])
self.__assert_sarray_equal(result['c'], sf['c'])
self.__assert_sarray_equal(result['X2'], expected_ab_default)
expected_sarray_ac_fillna_default = SArray([
[1, float('NaN')],
[2, 2.0],
[3, 3.0],
[4, float('NaN')],
[5, 5.0]
])
result = sf.pack_columns(['id','c'], dtype=array.array)
self.assertEqual(result.column_names(), ['b', 'X2'])
self.__assert_sarray_equal(result['b'], sf['b'])
self.__assert_sarray_equal(result['X2'], expected_sarray_ac_fillna_default)
expected_dict_default = SArray([
{'id': 1},
{'id': 2, 'b':'2', 'c': 2.0},
{'id': 3, 'b':'3', 'c': 3.0},
{'id':4 },
{'id':5, 'b':'5', 'c': 5.0}
])
result = sf.pack_columns(dtype=dict)
self.__assert_sarray_equal(result['X1'], expected_dict_default)
expected_dict_fillna = SArray([
{'id': 1, 'b':-1, 'c': -1},
{'id': 2, 'b':'2', 'c': 2.0},
{'id': 3, 'b':'3', 'c': 3.0},
{'id': 4, 'b':-1, 'c': -1},
{'id': 5, 'b':'5', 'c': 5.0}
])
result = sf.pack_columns(dtype=dict, fill_na=-1)
self.__assert_sarray_equal(result['X1'], expected_dict_fillna)
# pack large number of rows
sf = SFrame()
num_rows = 100000
sf['a'] = range(0, num_rows);
sf['b'] = range(0, num_rows);
result = sf.pack_columns(['a', 'b']);
self.assertEqual(len(result), num_rows);
def test_pack_columns_dtype(self):
a = SFrame({'name':[-140500967,-1405039672],'data':[3,4]})
b = a.pack_columns(['name','data'],dtype=array.array)
expected = SArray([[-140500967, 3],[-1405039672,4]])
self.__assert_sarray_equal(b['X1'], expected)
def test_unpack_list(self):
sa = SArray([
[1, None, None],
[2, '2', 2.0],
[3, '3', 3.0],
[4, None, None],
[5, '5', 5.0]
])
expected = SFrame()
expected ['a'] = [1, 2, 3, 4, 5]
expected ['b'] = [None, '2', '3', None, '5']
expected ['c'] = [None, 2.0, 3.0, None, 5.0]
result = sa.unpack();
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
result = sa.unpack(column_name_prefix='ttt');
self.assertEqual(result.column_names(), ['ttt.0', 'ttt.1', 'ttt.2'])
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# column types
result = sa.unpack(column_types=[int, str, float]);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# more column types
result = sa.unpack(column_types=[int, str, float, int]);
result.rename(dict(zip(result.column_names(), ['a','b','c','d'])))
e = expected.select_columns(['a','b','c'])
e.add_column(SArray([None for i in range(5)], int),'d')
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# less column types
result = sa.unpack(column_types=[int, str]);
result.rename(dict(zip(result.column_names(), ['a','b'])))
e = expected.select_columns(['a','b'])
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# fill na_value
e = SFrame()
e['a'] = [1, 2, None, 4, 5]
e['b'] = [None, '2', '3', None, '5']
e['c'] = [None, 2.0, None, None, 5.0]
result = sa.unpack(na_value=3);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# wrong length
with self.assertRaises(TypeError):
sa.unpack(column_name_prefix=['a','b'])
# wrong type
with self.assertRaises(RuntimeError):
sa.unpack(column_types = [str, int, float])
# wrong limit types
with self.assertRaises(TypeError):
sa.unpack(limit=["1"])
# int array cannot be unpacked
with self.assertRaises(TypeError):
SArray([1,2,3,4]).unpack()
# column name must be a string
with self.assertRaises(TypeError):
sa.unpack(1)
# invalid column type
with self.assertRaises(TypeError):
sa.unpack(column_types = int)
# invalid column type
with self.assertRaises(TypeError):
sa.unpack(column_types = [np.array])
# cannot infer type if no values
with self.assertRaises(RuntimeError):
SArray([], list).unpack()
def test_unpack_array(self):
sa = SArray([
[1, 1, 0],
[2, -1, 1],
[3, 3, 2],
[-1, 2, 3],
[5, 5, 4]
])
expected = SFrame()
expected ['a'] = [1.0, 2.0, 3.0, -1.0, 5.0]
expected ['b'] = [1.0, -1.0, 3.0, 2.0, 5.0]
expected ['c'] = [0.0, 1.0, 2.0, 3.0, 4.0]
result = sa.unpack();
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# right amount column names
result = sa.unpack(column_name_prefix = 'unpacked');
result.rename(dict(zip(result.column_names(), ['t.0', 't.1', 't.2'])))
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# column types
result = sa.unpack(column_types=[int, str, float]);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
expected['a'] = expected['a'].astype(int)
expected['b'] = expected['b'].astype(str)
expected['c'] = expected['c'].astype(float)
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# more column types
result = sa.unpack(column_types=[int, str, float, int]);
result.rename(dict(zip(result.column_names(), ['a','b','c','d'])))
e = expected.select_columns(['a','b','c'])
e.add_column(SArray([None for i in range(5)], int),'d')
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# less column types
result = sa.unpack(column_types=[int, str]);
result.rename(dict(zip(result.column_names(), ['a','b'])))
e = expected.select_columns(['a','b'])
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# fill na_value
e = SFrame()
e['a'] = gl.SArray([1, 2, 3, None, 5], float)
e['b'] = gl.SArray([1, None, 3, 2, 5], float)
e['c'] = gl.SArray([0, 1, 2, 3, 4], float)
result = sa.unpack(na_value=-1);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
def test_unpack_dict(self):
sf = SFrame()
sf["user_id"] = [1,2,3,4,5,6,7]
sf["is_restaurant"] = [1, 1,0,0, 1, None, None]
sf["is_retail"] = [None,1,1,None,1, None, None]
sf["is_electronics"] = ["yes", "no","yes",None,"no", None, None]
packed_sf = gl.SFrame()
packed_sf['user_id'] = sf['user_id']
packed_sf["category"] = [
{"is_restaurant": 1, "is_electronics": "yes"},
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{"is_restaurant": 0, "is_retail": 1, "is_electronics": "yes"},
{"is_restaurant": 0 },
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{ },
None]
with self.assertRaises(TypeError):
packed_sf['user_id'].unpack()
with self.assertRaises(TypeError):
packed_sf['category'].unpack(1)
with self.assertRaises(TypeError):
packed_sf['category'].unpack(value_types = [int])
# unpack only one column
expected_sf = gl.SFrame()
expected_sf["is_retail"] = sf["is_retail"]
unpacked_sf = packed_sf['category'].unpack(limit=["is_retail"], column_types=[int], column_name_prefix=None)
assert_frame_equal(unpacked_sf.to_dataframe(), expected_sf.to_dataframe())
# unpack all
unpacked_sf = packed_sf['category'].unpack(column_name_prefix=None, column_types=[int, int, str], limit=["is_restaurant", "is_retail", "is_electronics"])
assert_frame_equal(unpacked_sf.to_dataframe(), sf[["is_restaurant", "is_retail", "is_electronics"]].to_dataframe())
# auto infer types, the column order may be different, so use order here before comparison
unpacked_sf = packed_sf["category"].unpack()
unpacked_sf.rename({
"X.is_restaurant": "is_restaurant",
"X.is_retail": "is_retail",
"X.is_electronics": "is_electronics"
})
assert_frame_equal(unpacked_sf.to_dataframe().sort(axis=1), sf[["is_restaurant", "is_retail", "is_electronics"]].to_dataframe().sort(axis=1))
unpacked_sf = packed_sf["category"].unpack(na_value = 0, column_name_prefix="new")
expected = SFrame()
expected["new.is_restaurant"] = [1, 1,None,None, 1, None, None]
expected["new.is_retail"] = [None,1,1,None,1, None, None]
expected["new.is_electronics"] = ["yes", "no","yes",None,"no", None, None]
assert_frame_equal(unpacked_sf.to_dataframe().sort(axis=1), expected.to_dataframe().sort(axis=1))
# unpack a dictionary key integer as key
from graphlab import SArray
sa = SArray([
{1: 'a'},
{2: 'b'}
])
result = sa.unpack()
expected = SFrame({'X.1':['a', None], 'X.2':[None, 'b']})
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
result = sa.unpack(limit=[2])
expected = SFrame({'X.2':[None, 'b']})
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
result = sa.unpack(limit=[2], column_name_prefix="expanded")
expected = SFrame({'expanded.2':[None, 'b']})
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
sa = gl.SArray([{i:i} for i in range(500)])
unpacked_sa = sa.unpack()
self.assertEqual(len(unpacked_sa), len(sa))
i = 0
for v in unpacked_sa:
for j in range(500):
val = v['X.' + str(j)]
if (j == i):
self.assertEqual(val, i);
else:
self.assertEqual(val, None);
i = i + 1
# if types don't agree, convert to string automatically
sa = gl.SArray([{'a':1},{'a': 'a_3'}])
sf = sa.unpack()
self.assertEqual(sf.column_types(), [str])
sa = gl.SArray([{'a':None}, {'a': 1}])
sf = sa.unpack()
self.assertEqual(sf.column_types(), [int])
sa = gl.SArray([{'a':1}, {'a': None}])
sf = sa.unpack()
self.assertEqual(sf.column_types(), [int])
# type inferrence is already at server side even if limit is given
sa = gl.SArray([{'c'+str(i): i if i % 2 == 0 else 'v' + str(i)} for i in range(1000)])
unpacked = sa.unpack(limit=['c'+str(i) for i in range(10)], column_name_prefix="")
for i in range(10):
v = unpacked[i]
for j in range(10):
if (j != i):
self.assertEqual(v['c'+str(j)], None)
elif j % 2 == 0:
self.assertEqual(v['c'+str(j)], j)
else:
self.assertEqual(v['c'+str(j)], 'v' + str(j))
def test_unpack_sframe(self):
from graphlab import SFrame, SArray
import graphlab as gl
sf = gl.SFrame()
sf['user_id'] = range(7)
sf["category"] = [
{"is_restaurant": 1, "is_electronics": "yes"},
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{"is_restaurant": 0, "is_retail": 1, "is_electronics": "yes"},
{"is_restaurant": 0 },
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{ },
None]
sf['list'] = [
None,
range(1),
range(2),
range(3),
range(1),
range(2),
range(3),
]
with self.assertRaises(TypeError):
sf.unpack('user_id')
expected = SFrame()
expected['user_id'] = sf['user_id']
expected['list'] = sf['list']
expected["is_restaurant"] = [1, 1,0,0, 1, None, None]
expected["is_retail"] = [None,1,1,None,1, None, None]
expected["is_electronics"] = ["yes", "no","yes",None,"no", None, None]
result = sf.unpack('category')
result.rename({
'category.is_restaurant': 'is_restaurant',
'category.is_retail': 'is_retail',
'category.is_electronics': 'is_electronics'
})
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="")
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="abc")
result.rename({
'abc.is_restaurant': 'is_restaurant',
'abc.is_retail': 'is_retail',
'abc.is_electronics': 'is_electronics'
})
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="", column_types=[str], limit=['is_restaurant'])
new_expected = expected[['user_id', 'list', 'is_restaurant']]
new_expected['is_restaurant'] = new_expected['is_restaurant'].astype(str)
assert_frame_equal(new_expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="", na_value = None)
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='list')
expected = SFrame()
expected['user_id'] = sf['user_id']
expected['list.0'] = [None,0,0,0, 0,0,0]
expected['list.1'] = [None,None,1,1, None,1,1]
expected['list.2'] = [None,None,None,2, None, None,2]
expected['category'] = sf['category']
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='list', na_value= 2)
expected = SFrame()
expected['user_id'] = sf['user_id']
expected['list.0'] = [None,0,0,0, 0,0,0]
expected['list.1'] = [None,None,1,1, None,1,1]
expected['list.2'] = [None,None,None,None, None, None,None]
expected['category'] = sf['category']
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
# auto resolving conflicting names
sf = SFrame()
sf['a'] = range(100)
sf['b'] = [range(5) for i in range(100)]
sf['b.0'] = range(100)
sf['b.0.1'] = range(100)
result = sf.unpack('b')
self.assertEqual(result.column_names(), ['a', 'b.0', 'b.0.1', 'b.0.1.1', 'b.1.1.1', 'b.2.1.1', 'b.3.1.1', 'b.4.1.1'])
sf = SFrame()
sf['a'] = range(100)
sf['b'] = [{'str1': i, 'str2':i + 1} for i in range(100)]
sf['b.str1'] = range(100)
result = sf.unpack('b')
self.assertEqual(len(result.column_names()), 4)
def test_stack_dict(self):
sf = SFrame()
sf["user_id"] = [1,2,3,4,5]
sf["user_name"] = ['user' + str(i) for i in list(sf['user_id'])]
sf["category"] = [
{"is_restaurant": 1, },
{"is_restaurant": 0, "is_retail": 1 },
{ "is_retail": 0 },
{},
None]
expected_sf = SFrame();
expected_sf["user_id"] = [1,2, 2, 3,4,5]
expected_sf["user_name"] = ['user' + str(i) for i in list(expected_sf['user_id'])]
expected_sf['category'] = ['is_restaurant', 'is_restaurant', 'is_retail', 'is_retail', None, None]
expected_sf['value'] = [1,0,1,0, None, None]
df_expected = expected_sf.to_dataframe().sort(['user_id', 'category']).reset_index(drop=True)
with self.assertRaises(TypeError):
sf.stack()
with self.assertRaises(ValueError):
sf.stack('sss')
with self.assertRaises(ValueError):
sf.stack('category', ['user_id', 'value'])
# normal case
stacked_sf = sf.stack('category', ['category', 'value'])
assert_frame_equal(stacked_sf.to_dataframe().sort(["user_id", "category"]).reset_index(drop=True), df_expected)
# set column types
stacked_sf = sf.stack('category')
self.assertTrue(stacked_sf.column_types()[2] == str)
self.assertTrue(stacked_sf.column_types()[3] == int)
# auto generate column names
stacked_sf = sf.stack('category')
new_column_names = stacked_sf.column_names()
self.assertTrue(len(new_column_names) == 4)
expected_sf.rename({'category':new_column_names[2], 'value':new_column_names[3]})
df_expected = expected_sf.to_dataframe().sort(['user_id', new_column_names[2]]).reset_index(drop=True)
assert_frame_equal(stacked_sf.to_dataframe().sort(["user_id", new_column_names[2]]).reset_index(drop=True), df_expected)
#dropna
expected_sf = SFrame();
expected_sf["user_id"] = [1,2, 2, 3, 4, 5]
expected_sf["user_name"] = ['user' + str(i) for i in list(expected_sf['user_id'])]
expected_sf['category'] = ['is_restaurant', 'is_restaurant', 'is_retail', 'is_retail', None, None]
expected_sf['value'] = [1,0,1,0, None, None]
df_expected = expected_sf.to_dataframe().sort(['user_id', 'category']).reset_index(drop=True)
stacked_sf = sf.stack('category', ['category','value'], drop_na = False)
assert_frame_equal(stacked_sf.to_dataframe().sort(["user_id", "category"]).reset_index(drop=True), df_expected)
def test_stack_list(self):
sf = SFrame()
sf["a"] = [1,2,3,4,5]
sf["b"] = [['a', 'b'], ['c'], ['d'],['e', None], None]
expected_result = SFrame()
expected_result['a'] = [1,1,2,3,4,4,5]
expected_result['X1'] = ['a','b','c','d','e',None, None]
with self.assertRaises(TypeError):
sf.stack()
with self.assertRaises(ValueError):
sf.stack('sss')
with self.assertRaises(TypeError):
sf.stack('a')
with self.assertRaises(TypeError):
sf.stack('b', ["something"])
result = sf.stack("b", drop_na = False)
stacked_column_name = result.column_names()[1]
expected_result.rename({'X1':stacked_column_name})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# default drop_na=False
result = sf.stack("b")
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
expected_result.rename({stacked_column_name: 'b'})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# drop_na=True
result = sf.stack("b", drop_na = True)
expected_result = SFrame()
expected_result['a'] = [1,1,2,3,4,4]
expected_result[result.column_names()[1]] = ['a','b','c','d','e',None]
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
sf = SFrame()
n = 1000000
sf['a'] = range(1,n)
sf['b'] = [[str(i), str(i+1)] for i in range(1,n)]
result = sf.stack('b')
self.assertTrue(len(result), n * 2)
def test_stack_vector(self):
sf = SFrame()
sf["a"] = [1,2,3,4,5]
sf["b"] = [[1],[1,2],[1,2,3],[1,2,3,4],None]
expected_result = SFrame()
expected_result['a'] = [1,2,2,3,3,3,4,4,4,4,5]
expected_result['X1'] = [1,1,2,1,2,3,1,2,3,4,None]
with self.assertRaises(TypeError):
sf.stack()
with self.assertRaises(ValueError):
sf.stack('sss')
with self.assertRaises(TypeError):
sf.stack('a')
with self.assertRaises(TypeError):
sf.stack('b', ["something"])
result = sf.stack("b", drop_na = False)
stacked_column_name = result.column_names()[1]
expected_result.rename({'X1':stacked_column_name})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# default drop_na=False
result = sf.stack("b")
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
expected_result.rename({stacked_column_name: 'b'})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# drop_na=True
result = sf.stack("b", drop_na = True)
expected_result = SFrame()
expected_result['a'] = [1,2,2,3,3,3,4,4,4,4]
expected_result[result.column_names()[1]] = gl.SArray([1,1,2,1,2,3,1,2,3,4], float)
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
def test_unstack_dict(self):
sf = SFrame()
sf["user_id"] = [1,2,3,4]
sf["user_name"] = ['user' + str(i) for i in list(sf['user_id'])]
sf["categories"] = [
{"is_restaurant": 1, },
{"is_restaurant": 0, "is_retail": 1 },
{ "is_retail": 0 },
None]
stacked_sf = sf.stack('categories', ['category', 'value'], drop_na=False)
# normal unstack
unstacked_sf = stacked_sf.unstack(column=['category', 'value'], new_column_name = 'categories')
# these frames are *almost* equal except user4 will be {} instead of None
assert_frame_equal(sf.fillna('categories',{}).to_dataframe(), unstacked_sf.to_dataframe().sort("user_id").reset_index(drop=True))
# missing new column name
unstacked_sf = stacked_sf.unstack(['category', 'value'])
self.assertEqual(len(unstacked_sf.column_names()), 3)
unstacked_sf.rename({unstacked_sf.column_names()[2] : 'categories'})
assert_frame_equal(sf.fillna('categories',{}).to_dataframe(), unstacked_sf.to_dataframe().sort("user_id").reset_index(drop=True))
# missing column names
with self.assertRaises(KeyError):
stacked_sf.unstack(['category','value1'])
# wrong input
with self.assertRaises(TypeError):
stacked_sf.unstack(['category'])
# duplicate new column name
with self.assertRaises(RuntimeError):
unstacked_sf = stacked_sf.unstack(['category', 'value'], 'user_name')
def test_unstack_list(self):
sf = SFrame()
sf['a'] = [1,2,3,4]
sf['b'] = [range(10), range(20), range(30), range(50)]
stacked_sf = sf.stack('b', new_column_name = 'new_b')
unstacked_sf = stacked_sf.unstack('new_b', new_column_name = 'b')
self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])
unstacked_sf = stacked_sf.unstack('new_b')
unstacked_sf.rename({unstacked_sf.column_names()[1]: 'b'})
self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])
unstacked_sf = stacked_sf.unstack('new_b', new_column_name='b')
unstacked_sf.rename({unstacked_sf.column_names()[1]: 'b'})
self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])
with self.assertRaises(RuntimeError):
stacked_sf.unstack('new_b', new_column_name='a')
with self.assertRaises(TypeError):
stacked_sf.unstack(['new_b'])
with self.assertRaises(KeyError):
stacked_sf.unstack('non exist')
def test_content_identifier(self):
sf = SFrame({"a":[1,2,3,4],"b":["1","2","3","4"]})
a1 = sf['a'].__get_content_identifier__()
a2 = sf['a'].__get_content_identifier__()
self.assertEquals(a1, a2)
def test_random_access(self):
t1 = list(range(0,100000))
t2 = [str(i) for i in t1]
t = [{'t1':t1[i], 't2':t2[i]} for i in range(len(t1))];
s = gl.SFrame({'t1':t1,'t2':t2})
# simple slices
self.__test_equal(s[1:10000], pd.DataFrame(t[1:10000]))
self.__test_equal(s[0:10000:3], pd.DataFrame(t[0:10000:3]))
self.__test_equal(s[1:10000:3], pd.DataFrame(t[1:10000:3]))
self.__test_equal(s[2:10000:3], pd.DataFrame(t[2:10000:3]))
self.__test_equal(s[3:10000:101], pd.DataFrame(t[3:10000:101]))
# negative slices
self.__test_equal(s[-5:], pd.DataFrame(t[-5:]))
self.__test_equal(s[-1:], pd.DataFrame(t[-1:]))
self.__test_equal(s[-100:-10], pd.DataFrame(t[-100:-10]))
self.__test_equal(s[-100:-10:2], pd.DataFrame(t[-100:-10:2]))
# single element reads
self.assertEqual(s[511], t[511])
self.assertEqual(s[1912],t[1912])
self.assertEqual(s[-1], t[-1])
self.assertEqual(s[-10],t[-10])
# edge case odities
self.__test_equal(s[10:100:100], pd.DataFrame(t[10:100:100]))
self.__test_equal(s[-100:len(s):10], pd.DataFrame(t[-100:len(t):10]))
self.assertEqual(len(s[-1:-2]), 0)
self.assertEqual(len(s[-1:-1000:2]), 0)
with self.assertRaises(IndexError):
s[len(s)]
def test_sort(self):
sf = SFrame()
nrows = 100
sf['a'] = range(1, nrows)
sf['b'] = [float(i) for i in range(1,nrows)]
sf['c'] = [str(i) for i in range(1,nrows)]
sf['d'] = [[i, i+1] for i in range(1,nrows)]
reversed_sf = SFrame()
reversed_sf['a'] = range(nrows-1, 0, -1)
reversed_sf['b'] = [float(i) for i in range(nrows-1, 0, -1)]
reversed_sf['c'] = [str(i) for i in range(nrows-1, 0, -1)]
reversed_sf['d'] = [[i, i+1] for i in range(nrows-1, 0, -1)]
with self.assertRaises(TypeError):
sf.sort()
with self.assertRaises(TypeError):
sf.sort(1)
with self.assertRaises(TypeError):
sf.sort("d")
with self.assertRaises(ValueError):
sf.sort("nonexist")
with self.assertRaises(TypeError):
sf.sort({'a':True})
result = sf.sort('a')
assert_frame_equal(sf.to_dataframe(), result.to_dataframe());
result = sf.sort('a', ascending = False)
assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe());
# sort two columns
result = sf.sort(['a', 'b'])
assert_frame_equal(sf.to_dataframe(), result.to_dataframe());
result = sf.sort(['a', 'c'], ascending = False)
assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe());
result = sf.sort([('a', True), ('b', False)])
assert_frame_equal(sf.to_dataframe(), result.to_dataframe());
result = sf.sort([('a', False), ('b', True)])
assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe());
def test_dropna(self):
# empty case
sf = SFrame()
self.assertEquals(len(sf.dropna()), 0)
# normal case
self.__test_equal(self.employees_sf.dropna(), self.employees_sf[0:5].to_dataframe())
test_split = self.employees_sf.dropna_split()
self.__test_equal(test_split[0], self.employees_sf[0:5].to_dataframe())
self.__test_equal(test_split[1], self.employees_sf[5:6].to_dataframe())
# create some other test sframe
test_sf = SFrame({'ints':SArray([None,None,3,4,None], int),
'floats':SArray([np.nan,2.,3.,4.,np.nan],float),
'strs':SArray(['1',np.nan,'','4',None], str),
'lists':SArray([[1],None,[],[1,1,1,1],None], list),
'dicts':SArray([{1:2},{2:3},{},{4:5},None], dict)})
# another normal, but more interesting case
self.__test_equal(test_sf.dropna(),
pd.DataFrame({'ints':[3,4],'floats':[3.,4.],'strs':['','4'],'lists':[[],[1,1,1,1]],'dicts':[{},{4:5}]}))
test_split = test_sf.dropna_split()
self.__test_equal(test_split[0], test_sf[2:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[0:2].append(test_sf[4:5]).to_dataframe())
# the 'all' case
self.__test_equal(test_sf.dropna(how='all'), test_sf[0:4].to_dataframe())
test_split = test_sf.dropna_split(how='all')
self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())
# select some columns
self.__test_equal(test_sf.dropna(['ints','floats'], how='all'), test_sf[1:4].to_dataframe())
test_split = test_sf.dropna_split(['ints','floats'], how='all')
self.__test_equal(test_split[0], test_sf[1:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[0:1].append(test_sf[4:5]).to_dataframe())
self.__test_equal(test_sf.dropna('strs'), test_sf[0:4].to_dataframe())
test_split = test_sf.dropna_split('strs')
self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())
self.__test_equal(test_sf.dropna(['strs','dicts']), test_sf[0:4].to_dataframe())
test_split = test_sf.dropna_split(['strs','dicts'])
self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())
# bad stuff
with self.assertRaises(TypeError):
test_sf.dropna(1)
test_sf.dropna([1,2])
test_sf.dropna('strs', how=1)
test_sf.dropna_split(1)
test_sf.dropna_split([1,2])
test_sf.dropna_split('strs', how=1)
with self.assertRaises(ValueError):
test_sf.dropna('ints', how='blah')
test_sf.dropna_split('ints', how='blah')
with self.assertRaises(RuntimeError):
test_sf.dropna('dontexist')
test_sf.dropna_split('dontexist')
def test_add_row_number(self):
sf = SFrame(self.__create_test_df(400000))
sf = sf.add_row_number('id')
self.assertEquals(list(sf['id']), range(0,400000))
del sf['id']
sf = sf.add_row_number('id', -20000)
self.assertEquals(list(sf['id']), range(-20000,380000))
del sf['id']
sf = sf.add_row_number('id', 40000)
self.assertEquals(list(sf['id']), range(40000,440000))
with self.assertRaises(RuntimeError):
sf.add_row_number('id')
with self.assertRaises(TypeError):
sf = sf.add_row_number(46)
sf = sf.add_row_number('id2',start='hi')
def test_check_lazy_sframe_size(self):
# empty sframe, materialized, has_size
sf = SFrame()
self.assertTrue(sf.__is_materialized__())
self.assertTrue(sf.__has_size__())
# add one column, not materialized, has_size
sf['a'] = range(1000)
self.assertTrue(sf.__is_materialized__())
self.assertTrue(sf.__has_size__())
# materialize it, materialized, has_size
sf['a'] = range(1000)
sf.__materialize__()
self.assertTrue(sf.__is_materialized__())
self.assertTrue(sf.__has_size__())
# logical filter, not materialized, not has_size
sf = sf[sf['a'] > 5000]
self.assertFalse(sf.__is_materialized__())
self.assertFalse(sf.__has_size__())
def test_sframe_to_rdd(self):
if not HAS_PYSPARK:
print "Did not run Pyspark unit tests!"
return
sc = SparkContext('local')
# Easiest case: single column of integers
test_rdd = sc.parallelize(range(100))
sf = SFrame.from_rdd(test_rdd)
self.assertTrue(sf.num_cols(), 1)
self.assertTrue(sf.column_names(), ['X1'])
# We cast integers to floats to be safe on varying types
self.assertEquals([float(i) for i in range(0,100)], list(sf['X1']))
sc.stop()
def test_rdd_to_sframe(self):
if not HAS_PYSPARK:
print "Did not run Pyspark unit tests!"
return
sc = SparkContext('local')
# Easiest case: single column of integers
sf = SFrame({'column_name':range(100)})
test_rdd = sf.to_rdd(sc)
res = test_rdd.collect()
self.assertEquals(res, [{'column_name':long(i)} for i in range(100)])
sc.stop()
def test_column_manipulation_of_lazy_sframe(self):
import graphlab as gl
g=gl.SFrame({'a':[1,2,3,4,5],'id':[1,2,3,4,5]})
g = g[g['id'] > 2]
del g['id']
# if lazy column deletion is quirky, this will cause an exception
self.assertEquals(list(g[0:2]['a']), [3,4])
g=gl.SFrame({'a':[1,2,3,4,5],'id':[1,2,3,4,5]})
g = g[g['id'] > 2]
g.swap_columns('a','id')
# if lazy column swap is quirky, this will cause an exception
self.assertEquals(list(g[0:2]['a']), [3,4])
def test_empty_sarray(self):
with util.TempDirectory() as f:
sf = SArray()
sf.save(f)
sf2 = SArray(f)
self.assertEquals(len(sf2), 0)
def test_empty_sframe(self):
with util.TempDirectory() as f:
sf = SFrame()
sf.save(f)
sf2 = SFrame(f)
self.assertEquals(len(sf2), 0)
self.assertEquals(sf2.num_columns(), 0)
def test_none_column(self):
sf = SFrame({'a':[1,2,3,4,5]})
sf['b'] = None
self.assertEqual(sf['b'].dtype(), float)
df = pd.DataFrame({'a': [1,2,3,4,5], 'b': [None,None,None,None,None]})
self.__test_equal(sf, df)
sa = SArray.from_const(None, 100)
self.assertEquals(list(sa), [None] * 100)
self.assertEqual(sa.dtype(), float)
def test_apply_with_partial(self):
sf = SFrame({'a': [1, 2, 3, 4, 5]})
def concat_fn(character, row):
return '%s%d' % (character, row['a'])
my_partial_fn = functools.partial(concat_fn, 'x')
sa = sf.apply(my_partial_fn)
self.assertEqual(list(sa), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_apply_with_functor(self):
sf = SFrame({'a': [1, 2, 3, 4, 5]})
class Concatenator(object):
def __init__(self, character):
self.character = character
def __call__(self, row):
return '%s%d' % (self.character, row['a'])
concatenator = Concatenator('x')
sa = sf.apply(concatenator)
self.assertEqual(list(sa), ['x1', 'x2', 'x3', 'x4', 'x5'])
if __name__ == "__main__":
import sys
# Check if we are supposed to connect to another server
for i, v in enumerate(sys.argv):
if v.startswith("ipc://"):
gl._launch(v)
# The rest of the arguments need to get passed through to
# the unittest module
del sys.argv[i]
break
unittest.main()
| agpl-3.0 |
BigTone2009/sms-tools | lectures/06-Harmonic-model/plots-code/sines-partials-harmonics-phase.py | 22 | 1986 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/sine-440-490.wav')
w = np.hamming(3529)
N = 32768
hN = N/2
t = -20
pin = 4850
x1 = x[pin:pin+w.size]
mX1, pX1 = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX1, t)
pmag = mX1[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX1, pX1, ploc)
plt.figure(1, figsize=(9, 6))
plt.subplot(311)
plt.plot(fs*np.arange(pX1.size)/float(N), pX1, 'c', lw=1.5)
plt.plot(fs * iploc / N, ipphase, marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([200, 1000, -2, 8])
plt.title('pX + peaks (sine-440-490.wav)')
(fs, x) = UF.wavread('../../../sounds/vibraphone-C6.wav')
w = np.blackman(401)
N = 1024
hN = N/2
t = -80
pin = 200
x2 = x[pin:pin+w.size]
mX2, pX2 = DFT.dftAnal(x2, w, N)
ploc = UF.peakDetection(mX2, t)
pmag = mX2[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX2, pX2, ploc)
plt.subplot(3,1,2)
plt.plot(fs*np.arange(pX2.size)/float(N), pX2, 'c', lw=1.5)
plt.plot(fs * iploc/N, ipphase, marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([500,10000,min(pX2), 25])
plt.title('pX + peaks (vibraphone-C6.wav)')
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
w = np.blackman(651)
N = 2048
hN = N/2
t = -80
pin = 10000
x3 = x[pin:pin+w.size]
mX3, pX3 = DFT.dftAnal(x3, w, N)
ploc = UF.peakDetection(mX3, t)
pmag = mX3[ploc]
iploc, ipmag, ipphase = UF.peakInterp(mX3, pX3, ploc)
plt.subplot(3,1,3)
plt.plot(fs*np.arange(pX3.size)/float(N), pX3, 'c', lw=1.5)
plt.plot(fs * iploc / N, ipphase, marker='x', color='b', alpha=1, linestyle='', markeredgewidth=1.5)
plt.axis([0,6000,2, 24])
plt.title('pX + peaks (oboe-A4.wav)')
plt.tight_layout()
plt.savefig('sines-partials-harmonics-phase.png')
plt.show()
| agpl-3.0 |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/utils/tests/test_murmurhash.py | 79 | 2849 | # Author: Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float64)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
pvlib/pvlib-python | pvlib/inverter.py | 3 | 18552 | """
This module contains functions for inverter modeling and for fitting inverter
models to data.
Inverter models calculate AC power output from DC input. Model parameters
should be passed as a single dict.
Functions for estimating parameters for inverter models should follow the
naming pattern 'fit_<model name>', e.g., fit_sandia.
"""
import numpy as np
import pandas as pd
from numpy.polynomial.polynomial import polyfit # different than np.polyfit
def _sandia_eff(v_dc, p_dc, inverter):
r'''
Calculate the inverter AC power without clipping
'''
Paco = inverter['Paco']
Pdco = inverter['Pdco']
Vdco = inverter['Vdco']
C0 = inverter['C0']
C1 = inverter['C1']
C2 = inverter['C2']
C3 = inverter['C3']
Pso = inverter['Pso']
A = Pdco * (1 + C1 * (v_dc - Vdco))
B = Pso * (1 + C2 * (v_dc - Vdco))
C = C0 * (1 + C3 * (v_dc - Vdco))
return (Paco / (A - B) - C * (A - B)) * (p_dc - B) + C * (p_dc - B)**2
def _sandia_limits(power_ac, p_dc, Paco, Pnt, Pso):
r'''
Applies minimum and maximum power limits to `power_ac`
'''
power_ac = np.minimum(Paco, power_ac)
min_ac_power = -1.0 * abs(Pnt)
below_limit = p_dc < Pso
try:
power_ac[below_limit] = min_ac_power
except TypeError: # power_ac is a float
if below_limit:
power_ac = min_ac_power
return power_ac
def sandia(v_dc, p_dc, inverter):
r'''
Convert DC power and voltage to AC power using Sandia's
Grid-Connected PV Inverter model.
Parameters
----------
v_dc : numeric
DC voltage input to the inverter. [V]
p_dc : numeric
DC power input to the inverter. [W]
inverter : dict-like
Defines parameters for the inverter model in [1]_.
Returns
-------
power_ac : numeric
AC power output. [W]
Notes
-----
Determines the AC power output of an inverter given the DC voltage and DC
power. Output AC power is bounded above by the parameter ``Paco``, to
represent inverter "clipping". When `power_ac` would be less than
parameter ``Pso`` (startup power required), then `power_ac` is set to
``-Pnt``, representing self-consumption. `power_ac` is not adjusted for
maximum power point tracking (MPPT) voltage windows or maximum current
limits of the inverter.
Required model parameters are:
====== ============================================================
Column Description
====== ============================================================
Paco AC power rating of the inverter. [W]
Pdco DC power input that results in Paco output at reference
voltage Vdco. [W]
Vdco DC voltage at which the AC power rating is achieved
with Pdco power input. [V]
Pso DC power required to start the inversion process, or
self-consumption by inverter, strongly influences inverter
efficiency at low power levels. [W]
C0 Parameter defining the curvature (parabolic) of the
relationship between AC power and DC power at the reference
operating condition. [1/W]
C1 Empirical coefficient allowing ``Pdco`` to vary linearly
with DC voltage input. [1/V]
C2 Empirical coefficient allowing ``Pso`` to vary linearly with
DC voltage input. [1/V]
C3 Empirical coefficient allowing ``C0`` to vary linearly with
DC voltage input. [1/V]
Pnt AC power consumed by the inverter at night (night tare). [W]
====== ============================================================
A copy of the parameter database from the System Advisor Model (SAM) [2]_
is provided with pvlib and may be read using
:py:func:`pvlib.pvsystem.retrieve_sam`.
References
----------
.. [1] D. King, S. Gonzalez, G. Galbraith, W. Boyson, "Performance Model
for Grid-Connected Photovoltaic Inverters", SAND2007-5036, Sandia
National Laboratories.
.. [2] System Advisor Model web page. https://sam.nrel.gov.
See also
--------
pvlib.pvsystem.retrieve_sam
'''
Paco = inverter['Paco']
Pnt = inverter['Pnt']
Pso = inverter['Pso']
power_ac = _sandia_eff(v_dc, p_dc, inverter)
power_ac = _sandia_limits(power_ac, p_dc, Paco, Pnt, Pso)
if isinstance(p_dc, pd.Series):
power_ac = pd.Series(power_ac, index=p_dc.index)
return power_ac
def sandia_multi(v_dc, p_dc, inverter):
r'''
Convert DC power and voltage to AC power for an inverter with multiple
MPPT inputs.
Uses Sandia's Grid-Connected PV Inverter model [1]_.
Parameters
----------
v_dc : tuple, list or array of numeric
DC voltage on each MPPT input of the inverter. If type is array, must
be 2d with axis 0 being the MPPT inputs. [V]
p_dc : tuple, list or array of numeric
DC power on each MPPT input of the inverter. If type is array, must
be 2d with axis 0 being the MPPT inputs. [W]
inverter : dict-like
Defines parameters for the inverter model in [1]_.
Returns
-------
power_ac : numeric
AC power output for the inverter. [W]
Raises
------
ValueError
If v_dc and p_dc have different lengths.
Notes
-----
See :py:func:`pvlib.inverter.sandia` for definition of the parameters in
`inverter`.
References
----------
.. [1] D. King, S. Gonzalez, G. Galbraith, W. Boyson, "Performance Model
for Grid-Connected Photovoltaic Inverters", SAND2007-5036, Sandia
National Laboratories.
See also
--------
pvlib.inverter.sandia
'''
if len(p_dc) != len(v_dc):
raise ValueError('p_dc and v_dc have different lengths')
power_dc = sum(p_dc)
power_ac = 0. * power_dc
for vdc, pdc in zip(v_dc, p_dc):
power_ac += pdc / power_dc * _sandia_eff(vdc, power_dc, inverter)
return _sandia_limits(power_ac, power_dc, inverter['Paco'],
inverter['Pnt'], inverter['Pso'])
def adr(v_dc, p_dc, inverter, vtol=0.10):
r'''
Converts DC power and voltage to AC power using Anton Driesse's
grid-connected inverter efficiency model.
Parameters
----------
v_dc : numeric
DC voltage input to the inverter, should be >= 0. [V]
p_dc : numeric
DC power input to the inverter, should be >= 0. [W]
inverter : dict-like
Defines parameters for the inverter model in [1]_. See Notes for
required model parameters. A parameter database is provided with pvlib
and may be read using :py:func:`pvlib.pvsystem.retrieve_sam`.
vtol : numeric, default 0.1
Fraction of DC voltage that determines how far the efficiency model is
extrapolated beyond the inverter's normal input voltage operating
range. 0.0 <= vtol <= 1.0. [unitless]
Returns
-------
power_ac : numeric
AC power output. [W]
Notes
-----
Determines the AC power output of an inverter given the DC voltage and DC
power. Output AC power is bounded above by the parameter ``Pacmax``, to
represent inverter "clipping". AC power is bounded below by ``-Pnt``
(negative when power is consumed rather than produced) which represents
self-consumption. `power_ac` is not adjusted for maximum power point
tracking (MPPT) voltage windows or maximum current limits of the inverter.
Required model parameters are:
================ ==========================================================
Column Description
================ ==========================================================
Pnom Nominal DC power, typically the DC power needed to produce
maximum AC power output. [W]
Vnom Nominal DC input voltage. Typically the level at which the
highest efficiency is achieved. [V]
Vmax Maximum DC input voltage. [V]
Vmin Minimum DC input voltage. [V]
Vdcmax Maximum voltage supplied from DC array. [V]
MPPTHi Maximum DC voltage for MPPT range. [V]
MPPTLow Minimum DC voltage for MPPT range. [V]
Pacmax Maximum AC output power, used to clip the output power
if needed. [W]
ADRCoefficients A list of 9 coefficients that capture the influence
of input voltage and power on inverter losses, and thereby
efficiency. Corresponds to terms from [1]_ (in order):
:math: `b_{0,0}, b_{1,0}, b_{2,0}, b_{0,1}, b_{1,1},
b_{2,1}, b_{0,2}, b_{1,2}, b_{2,2}`. See [1]_ for the
use of each coefficient and its associated unit.
Pnt AC power consumed by inverter at night (night tare) to
maintain circuitry required to sense the PV array
voltage. [W]
================ ==========================================================
AC power output is set to NaN where the input DC voltage exceeds a limit
M = max(Vmax, Vdcmax, MPPTHi) x (1 + vtol), and where the input DC voltage
is less than a limit m = max(Vmin, MPPTLow) x (1 - vtol)
References
----------
.. [1] A. Driesse, "Beyond the Curves: Modeling the Electrical Efficiency
of Photovoltaic Inverters", 33rd IEEE Photovoltaic Specialist
Conference (PVSC), June 2008
See also
--------
pvlib.inverter.sandia
pvlib.pvsystem.retrieve_sam
'''
p_nom = inverter['Pnom']
v_nom = inverter['Vnom']
pac_max = inverter['Pacmax']
p_nt = inverter['Pnt']
ce_list = inverter['ADRCoefficients']
v_max = inverter['Vmax']
v_min = inverter['Vmin']
vdc_max = inverter['Vdcmax']
mppt_hi = inverter['MPPTHi']
mppt_low = inverter['MPPTLow']
v_lim_upper = float(np.nanmax([v_max, vdc_max, mppt_hi]) * (1 + vtol))
v_lim_lower = float(np.nanmax([v_min, mppt_low]) * (1 - vtol))
pdc = p_dc / p_nom
vdc = v_dc / v_nom
# zero voltage will lead to division by zero, but since power is
# set to night time value later, these errors can be safely ignored
with np.errstate(invalid='ignore', divide='ignore'):
poly = np.array([pdc**0, # replace with np.ones_like?
pdc,
pdc**2,
vdc - 1,
pdc * (vdc - 1),
pdc**2 * (vdc - 1),
1. / vdc - 1, # divide by 0
pdc * (1. / vdc - 1), # invalid 0./0. --> nan
pdc**2 * (1. / vdc - 1)]) # divide by 0
p_loss = np.dot(np.array(ce_list), poly)
power_ac = p_nom * (pdc - p_loss)
p_nt = -1 * np.absolute(p_nt)
# set output to nan where input is outside of limits
# errstate silences case where input is nan
with np.errstate(invalid='ignore'):
invalid = (v_lim_upper < v_dc) | (v_dc < v_lim_lower)
power_ac = np.where(invalid, np.nan, power_ac)
# set night values
power_ac = np.where(vdc == 0, p_nt, power_ac)
power_ac = np.maximum(power_ac, p_nt)
# set max ac output
power_ac = np.minimum(power_ac, pac_max)
if isinstance(p_dc, pd.Series):
power_ac = pd.Series(power_ac, index=pdc.index)
return power_ac
def pvwatts(pdc, pdc0, eta_inv_nom=0.96, eta_inv_ref=0.9637):
r"""
NREL's PVWatts inverter model.
The PVWatts inverter model [1]_ calculates inverter efficiency :math:`\eta`
as a function of input DC power
.. math::
\eta = \frac{\eta_{nom}}{\eta_{ref}} (-0.0162\zeta - \frac{0.0059}
{\zeta} + 0.9858)
where :math:`\zeta=P_{dc}/P_{dc0}` and :math:`P_{dc0}=P_{ac0}/\eta_{nom}`.
Output AC power is then given by
.. math::
P_{ac} = \min(\eta P_{dc}, P_{ac0})
Parameters
----------
pdc : numeric
DC power. Same unit as ``pdc0``.
pdc0: numeric
DC input limit of the inverter. Same unit as ``pdc``.
eta_inv_nom: numeric, default 0.96
Nominal inverter efficiency. [unitless]
eta_inv_ref: numeric, default 0.9637
Reference inverter efficiency. PVWatts defines it to be 0.9637
and is included here for flexibility. [unitless]
Returns
-------
power_ac: numeric
AC power. Same unit as ``pdc0``.
Notes
-----
Note that ``pdc0`` is also used as a symbol in
:py:func:`pvlib.pvsystem.pvwatts_dc`. ``pdc0`` in this function refers to
the DC power input limit of the inverter. ``pdc0`` in
:py:func:`pvlib.pvsystem.pvwatts_dc` refers to the DC power of the modules
at reference conditions.
See Also
--------
pvlib.inverter.pvwatts_multi
References
----------
.. [1] A. P. Dobos, "PVWatts Version 5 Manual,"
http://pvwatts.nrel.gov/downloads/pvwattsv5.pdf (2014).
"""
pac0 = eta_inv_nom * pdc0
zeta = pdc / pdc0
# arrays to help avoid divide by 0 for scalar and array
eta = np.zeros_like(pdc, dtype=float)
pdc_neq_0 = ~np.equal(pdc, 0)
# eta < 0 if zeta < 0.006. power_ac is forced to be >= 0 below. GH 541
eta = eta_inv_nom / eta_inv_ref * (
-0.0162 * zeta - np.divide(0.0059, zeta, out=eta, where=pdc_neq_0)
+ 0.9858) # noQA: W503
power_ac = eta * pdc
power_ac = np.minimum(pac0, power_ac)
power_ac = np.maximum(0, power_ac) # GH 541
return power_ac
def pvwatts_multi(pdc, pdc0, eta_inv_nom=0.96, eta_inv_ref=0.9637):
r"""
Extend NREL's PVWatts inverter model for multiple MPP inputs.
DC input power is summed over MPP inputs to obtain the DC power
input to the PVWatts inverter model. See :py:func:`pvlib.inverter.pvwatts`
for details.
Parameters
----------
pdc : tuple, list or array of numeric
DC power on each MPPT input of the inverter. If type is array, must
be 2d with axis 0 being the MPPT inputs. Same unit as ``pdc0``.
pdc0: numeric
Total DC power limit of the inverter. Same unit as ``pdc``.
eta_inv_nom: numeric, default 0.96
Nominal inverter efficiency. [unitless]
eta_inv_ref: numeric, default 0.9637
Reference inverter efficiency. PVWatts defines it to be 0.9637
and is included here for flexibility. [unitless]
Returns
-------
power_ac: numeric
AC power. Same unit as ``pdc0``.
See Also
--------
pvlib.inverter.pvwatts
"""
return pvwatts(sum(pdc), pdc0, eta_inv_nom, eta_inv_ref)
def fit_sandia(ac_power, dc_power, dc_voltage, dc_voltage_level, p_ac_0, p_nt):
r'''
Determine parameters for the Sandia inverter model.
Parameters
----------
ac_power : array_like
AC power output at each data point [W].
dc_power : array_like
DC power input at each data point [W].
dc_voltage : array_like
DC input voltage at each data point [V].
dc_voltage_level : array_like
DC input voltage level at each data point. Values must be 'Vmin',
'Vnom' or 'Vmax'.
p_ac_0 : float
Rated AC power of the inverter [W].
p_nt : float
Night tare, i.e., power consumed while inverter is not delivering
AC power. [W]
Returns
-------
dict
A set of parameters for the Sandia inverter model [1]_. See
:py:func:`pvlib.inverter.sandia` for a description of keys and values.
See Also
--------
pvlib.inverter.sandia
Notes
-----
The fitting procedure to estimate parameters is described at [2]_.
A data point is a pair of values (dc_power, ac_power). Typically, inverter
performance is measured or described at three DC input voltage levels,
denoted 'Vmin', 'Vnom' and 'Vmax' and at each level, inverter efficiency
is determined at various output power levels. For example,
the CEC inverter test protocol [3]_ specifies measurement of input DC
power that delivers AC output power of 0.1, 0.2, 0.3, 0.5, 0.75 and 1.0 of
the inverter's AC power rating.
References
----------
.. [1] D. King, S. Gonzalez, G. Galbraith, W. Boyson, "Performance Model
for Grid-Connected Photovoltaic Inverters", SAND2007-5036, Sandia
National Laboratories.
.. [2] Sandia Inverter Model page, PV Performance Modeling Collaborative
https://pvpmc.sandia.gov/modeling-steps/dc-to-ac-conversion/sandia-inverter-model/
.. [3] W. Bower, et al., "Performance Test Protocol for Evaluating
Inverters Used in Grid-Connected Photovoltaic Systems", available at
https://www.energy.ca.gov/sites/default/files/2020-06/2004-11-22_Sandia_Test_Protocol_ada.pdf
''' # noqa: E501
voltage_levels = ['Vmin', 'Vnom', 'Vmax']
# average dc input voltage at each voltage level
v_d = np.array(
[dc_voltage[dc_voltage_level == 'Vmin'].mean(),
dc_voltage[dc_voltage_level == 'Vnom'].mean(),
dc_voltage[dc_voltage_level == 'Vmax'].mean()])
v_nom = v_d[1] # model parameter
# independent variable for regressions, x_d
x_d = v_d - v_nom
# empty dataframe to contain intermediate variables
coeffs = pd.DataFrame(index=voltage_levels,
columns=['a', 'b', 'c', 'p_dc', 'p_s0'], data=np.nan)
def solve_quad(a, b, c):
return (-b + (b**2 - 4 * a * c)**.5) / (2 * a)
# [2] STEP 3E, fit a line to (DC voltage, model_coefficient)
def extract_c(x_d, add):
beta0, beta1 = polyfit(x_d, add, 1)
c = beta1 / beta0
return beta0, beta1, c
for d in voltage_levels:
x = dc_power[dc_voltage_level == d]
y = ac_power[dc_voltage_level == d]
# [2] STEP 3B
# fit a quadratic to (DC power, AC power)
c, b, a = polyfit(x, y, 2)
# [2] STEP 3D, solve for p_dc and p_s0
p_dc = solve_quad(a, b, (c - p_ac_0))
p_s0 = solve_quad(a, b, c)
# Add values to dataframe at index d
coeffs['a'][d] = a
coeffs['p_dc'][d] = p_dc
coeffs['p_s0'][d] = p_s0
b_dc0, b_dc1, c1 = extract_c(x_d, coeffs['p_dc'])
b_s0, b_s1, c2 = extract_c(x_d, coeffs['p_s0'])
b_c0, b_c1, c3 = extract_c(x_d, coeffs['a'])
p_dc0 = b_dc0
p_s0 = b_s0
c0 = b_c0
# prepare dict and return
return {'Paco': p_ac_0, 'Pdco': p_dc0, 'Vdco': v_nom, 'Pso': p_s0,
'C0': c0, 'C1': c1, 'C2': c2, 'C3': c3, 'Pnt': p_nt}
| bsd-3-clause |
slivingston/BatStack | src/analysis/arrsim.py | 1 | 16278 | #!/usr/bin/env python
"""
Simulate microphone array measurement; models BatStack Array.
NOTES:
Note that we wrap white noise outside valid range back into the signal!
This procedure is biased toward lower values; explicitly,
an absolute value operation and modulo are applied.
The abs is natural, but mod causes large signals to appear again
near zero. This is in significant contrast to clipping.
I tend to interchange freely between indexing channels via keys
in a dictionary (as stored internally in BSArrayFile object)
and simply counting from 1 to total number of microphone channels.
This must be cleaned up later to only use dictionary keys, which
is more general.
N.B., source level is not a fixed, physical value (e.g. it is not specified in
units of dB SPL). That is, everything should be accurate with respect to the
simulation, and the resulting signal levels differ from physical quantities by a
uniform scaling factor (hence, beamshape, etc. should be unaffected). I
consider this a weakness and plan to implement accurate source level
representation.
N.B., lacks (frequency-dependent) atmospheric attenuation! I plan to implement
values recorded in (Lawrence & Simmons, 1982).
N.B., we assume end-trigger (i.e. time at trigger is 0).
At time of writing, this only affects timestamps in the sim d3 file
(which is only generated if requested, anyway).
Scott Livingston <slivingston@caltech.edu>
Feb-Mar 2011.
"""
import sys
import types
from optparse import OptionParser # Deprecated since Python v2.7, but for folks using 2.5 or 2.6...
import numpy as np
import numpy.linalg as la
import scipy.special as sp_special
import scipy.io as sio
import matplotlib.pyplot as plt
import batstack
# Convenience renaming:
eps = np.finfo(np.float).eps
def piston( f, # frequency
a_h, a_v,
theta, phi,
c=343 ): # speed of sound, in m/s.
"""Rigid elliptical piston in an infinite baffle,
at frequency f Hz,
with horizontal radius a_h (in meters) and vertical radius a_v.
following description in the book by Beranek, Leo L. (1954). Acoustics.
(yes, there is a more recent edition, but I don't have a copy...)
Theta and phi, which are azimuth and elevation, resp., have units of
radians. P is sound pressure in units of N/m^2 (? this should be
verified). If theta and phi are scalars, or one is a vector, then
behavior is as you would expect: you get a scalar or vector back.
If both theta and phi are vectors, then P is a matrix where where
columns correspond to values of theta, and rows correspond to values
of phi.
NOTES: - It is possible I have made a mistake in the below
computations. The returned values from besselj are complex
with, in some places, small but nonzero imaginary
components. I address this by taking absolute value of P
(i.e. complex magnitude); this matches intuition but awaits
confirmation till I learn more acoustics theory
"""
k = 2*np.pi*f/c # wave number
if type(theta) is types.IntType or type(theta) is types.FloatType or type(theta) is np.float64:
theta = np.array([theta], dtype=np.float64)
else:
theta = np.array(theta, dtype=np.float64)
if type(phi) is types.IntType or type(phi) is types.FloatType or type(phi) is np.float64:
phi = np.array([phi], dtype=np.float64)
else:
phi = np.array(phi, dtype=np.float64)
h_term = k*a_h*np.sin(theta)
v_term = k*a_v*np.sin(phi)
h_factor = .5*np.ones(shape=h_term.shape)
for k in range(len(h_factor)):
if np.abs(h_term[k]) > 4*np.finfo(np.float64).eps:
h_factor[k] = sp_special.jn(1, h_term[k])/h_term[k]
v_factor = .5*np.ones(shape=v_term.shape)
for k in range(len(v_factor)):
if np.abs(v_term[k]) > 4*np.finfo(np.float64).eps:
v_factor[k] = sp_special.jn(1, v_term[k])/v_term[k]
if v_factor.shape[0] > 1 and h_factor.shape[0] > 1:
return 4*np.outer(np.abs(v_factor), np.abs(h_factor)) # make P from outer product.
else:
return 4*np.abs(v_factor*h_factor)
def get_front_center_pose(mike_pos, # Microphone position matrix, as elsewhere.
center_dist=1., # meters from array center
origin_in_front=True):
"""compute ``front-and-center'' (or ``ideal'') pose w.r.t. microphones.
Note that I assume origin is on the side of the array that is
``in front''. This can be flipping by setting origin_in_front
argument to False.
Returns result as a 5-element array, specifying the source pose.
(compatible with get_dir function, etc.)
"""
mike_center = np.array([np.mean((mike_pos.T)[0]), np.mean((mike_pos.T)[1]),
np.mean((mike_pos.T)[2]), 0, 0])
dir_mat = get_dir(mike_center, mike_pos)
mike_center = mike_center[:3]
far_index = np.argmax(dir_mat, axis=0)[0]
if far_index > 1:
far2_index = np.argmax((dir_mat.T)[0][:far_index])
else:
far2_index = np.argmax((dir_mat.T)[0][(far_index+1):]) + far_index+1
proj_vect = np.cross(mike_pos[far_index]-mike_center,
mike_pos[far2_index]-mike_center)
proj_vect = proj_vect*center_dist/la.norm(proj_vect) # Scale to desired distance
# Set appropriate orientation
if np.dot(proj_vect, mike_center) > 0 or not origin_in_front:
proj_vect *= -1.
theta = np.arctan2(-proj_vect[1], -proj_vect[0]) # Find angular direction
phi = np.arctan2(-proj_vect[2], np.sqrt(proj_vect[0]**2 + proj_vect[1]**2))
proj_vect += mike_center # Translate to global coordinates
return np.array([proj_vect[0], proj_vect[1], proj_vect[2], theta, phi])
def test_piston():
"""use case for piston function.
...can be used by simply calling from main program entry-point.
test_piston takes frequency from argv list.
"""
theta = np.linspace(-np.pi/2, np.pi/2, 1000)
#phi = np.linspace(-np.pi/2, np.pi/2, 30)
try:
if len(sys.argv) < 2:
raise ValueError # a little sloppy to do it this way
freq = float(sys.argv[1])
except ValueError:
print "Usage: %s f" % sys.argv[0]
exit(1)
P = piston(freq, .0163, .0163, theta, 0)
print P.shape
plt.semilogy(theta, P)
plt.grid()
plt.xlabel("angle in radians")
plt.title(str(freq/1000)+" kHz")
plt.show()
def get_dir(src_pos, mike_pos):
"""Determine r, theta, phi values from source to microphones.
src_pos should follow format as described elsewhere,
i.e. an array of x,y,z,t,p, where
x,y,z are rectangular coordinates and
t,p are spherical-like coordinates (think ``theta'' and
``phi'' for azimuth and elevation with respect to the
x-axis; e.g., t,p = 0,0 indicates aimed parallel to
positive x-axis.
Result is returned as a N x 3 matrix, where N is the number
of microphones (i.e. number of rows in given mike_pos matrix).
"""
if len(mike_pos.shape) > 1:
num_mics = mike_pos.shape[0]
dir_mat = np.zeros(shape=(num_mics, 3))
for k in range(num_mics):
trans_vect = np.array([mike_pos[k][j]-src_pos[j] for j in range(3)])
dir_mat[k][0] = np.sqrt(np.sum([u**2 for u in trans_vect])) # Radius
dir_mat[k][1] = np.arctan2(trans_vect[1], trans_vect[0]) - src_pos[3]
dir_mat[k][2] = np.arctan2(trans_vect[2], np.sqrt(trans_vect[0]**2 + trans_vect[1]**2)) - src_pos[4]
else: # Handle special case of single microphone position
num_mics = 1
trans_vect = np.array([mike_pos[j]-src_pos[j] for j in range(3)])
dir_mat = np.array([0., 0, 0])
dir_mat[0] = np.sqrt(np.sum([u**2 for u in trans_vect])) # Radius
dir_mat[1] = np.arctan2(trans_vect[1], trans_vect[0]) - src_pos[3]
dir_mat[2] = np.arctan2(trans_vect[2], np.sqrt(trans_vect[0]**2 + trans_vect[1]**2)) - src_pos[4]
return dir_mat
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-f", "--wamike", dest="pos_filename", default=None,
help="wamike.txt position file; cf. BatStack ref manual")
parser.add_option("-p", "--pose", dest="src_pose", default="0,0,0,0,0",
help="source position (default is 0,0,0,0,0); with respect to the x-axis, and of the form x,y,z,t,p")
parser.add_option("-t", dest="duration", type="float", default=.1,
help="duration of simulated recording.")
parser.add_option("-w", dest="wnoise", type="float", nargs=2, default=(512, 32),
help="mean and variance (in bits) of white background noise.")
parser.add_option("-c", dest="src_freq", type="float", default=35.,
help="piston source frequency (in kHz).")
parser.add_option("-b", "--bitwidth", dest="sample_width", metavar="BITWIDTH", type="int", default=10,
help="sample width of A/D converter; default is 10 bits.")
parser.add_option("-n", "--nonoise", action="store_true", dest="no_noise", default=False)
parser.add_option("-s", "--speed", type="float", dest="speed_sound", default=343.,
help="in m/s; default is 343.")
parser.add_option("-o", dest="output_filename", default="test.bin",
help="name of file to save results to; default is ``test.bin''")
parser.add_option("-g", "--d3out", action="store_true", dest="make_d3_traj", default=False,
help="create d3-conforming trajectory file corresponding to this simulation; file-name is based on that of sim Array data file.")
parser.add_option("--spherical-loss", action="store_true", dest="apply_spherical_loss", default=False,
help="apply pressure loss of r^(-1) factor, i.e. ``spherical spreading''; disabled by default.")
parser.add_option("--front-and-center", action="store_true", dest="src_front_center", default=False,
help="place source ``front-and-center'' with respect to and toward center of the microphone array; overrides any other source position configuration")
parser.add_option("--fac-dist", type="float", dest="fac_dist", default=1.,
help="distance from array for ``front-and-center'' source position; default is 1 m.")
parser.add_option("--source-level", type="float", dest="src_level", default=256.,
help="magnitude of source (not physically meaningful); default is 256.")
(options, args) = parser.parse_args()
if options.pos_filename is None:
print "A microphone position file must be provided.\n(See -h for usage note.)"
exit(1)
if options.sample_width < 0:
print "Error: sample width must be positive (given %d)." % options.sample_width
exit(1)
max_val = 2**(options.sample_width)-1
mike_pos = np.loadtxt(options.pos_filename)
if (len(mike_pos.shape) == 1 and mike_pos.shape[0] != 3) \
or mike_pos.shape[1] != 3: # check that mike position file is reasonable
print "Error: given file appears malformed: %s" % options.pos_filename
exit(1)
if options.src_front_center:
# Place source in ``ideal position'' with respect to microphones.
if options.fac_dist < .1:
print "Warning: distance from array for ``front-and-center'' source\nposition is less than 10 cm."
src_pos = get_front_center_pose(mike_pos, center_dist=options.fac_dist)
print "Source has fixed pose: (%.4f, %.4f, %.4f, %.4f, %.4f)" % tuple(src_pos)
else:
try:
src_pos = np.array([float(k) for k in options.src_pose.split(",")])
except ValueError:
print "Source position invalid; it should be comma-separated real numbers."
print """E.g., with an x,y,z position of (2, 3.4, -1) and directed
.3 radians CCW on the z-axis and -1.1 radians CCW on the y-axis,
you would enter 2,3.4,-1,.3,-1.1
"""
exit(1)
sim_bsaf = batstack.BSArrayFile()
if len(mike_pos.shape) == 1:
sim_bsaf.num_mics = 1 # Special case of only one microphone
else:
sim_bsaf.num_mics = mike_pos.shape[0]
sim_bsaf.trial_number = 1
sim_bsaf.notes = "Simulated data!"
num_samples = np.ceil(options.duration/sim_bsaf.sample_period)
if not options.no_noise:
for k in range(sim_bsaf.num_mics):
#sim_bsaf.data[k+1] = np.random.random_integers(low=0, high=1023, size=1000)
sim_bsaf.data[k+1] = np.mod(np.ceil(np.abs(np.random.randn(num_samples)
*options.wnoise[1]+options.wnoise[0])), max_val+1)
# Note that we wrap white noise outside valid range back into the signal!
# This procedure is biased toward lower values; explicitly,
# an absolute value operation and modulo are applied.
# The abs is natural, but mod causes large signals to appear again near zero.
# This is in significant contrast to clipping.
else: # Noiseless recording:
for k in range(sim_bsaf.num_mics):
sim_bsaf.data[k+1] = (max_val+1)/2.*np.ones(num_samples)
src_start_ind = int(num_samples/2)
src_duration = .01 # 10 ms
t = np.arange(0, src_duration, sim_bsaf.sample_period)
x = options.src_level*np.sin(2*np.pi*t*options.src_freq*1e3)
# N.B., we assume end-trigger (i.e. time at trigger is 0)
print "Starting emission at time %f s" % (sim_bsaf.sample_period*(src_start_ind-num_samples+1))
dir_mat = get_dir(src_pos, mike_pos)
P = dict()
max_P = -1
for k in sim_bsaf.data.keys():
P[k] = piston(options.src_freq*1e3, .016, .016,
dir_mat[k-1][1], # Theta
dir_mat[k-1][2]) # Phi
if max_P < P[k]:
max_P = P[k]
if max_P <= eps:
print "Warning: max scaling factor is less than machine epsilon."
for k in P.keys():
P[k] /= max_P # Normalize
for k in sim_bsaf.data.keys():
# Compute offset in indices due to time-of-flight from source to microphone
tof_offset = int(np.ceil((dir_mat[k-1][0]/options.speed_sound)/sim_bsaf.sample_period))
# Bound index range to fit channel recording length
lower_ind = tof_offset+src_start_ind
upper_ind = tof_offset+src_start_ind+len(x)
if lower_ind >= len(sim_bsaf.data[k]):
continue # This channel didn't receive signal in time
if upper_ind > len(sim_bsaf.data[k]):
upper_ind = len(sim_bsaf.data[k])
if options.apply_spherical_loss:
# Spherical spreading loss (just r^(-1) here);
# this is reduction in pressure; baseline distance is 10 cm.
sph_loss = .1/(dir_mat[k-1][0])
else:
sph_loss = 1. # Hopefully this does not cause round-off errors to occur
# (as opposed to not multiplying by sph_los=1., i.e. by
# using a conditional statement.)
chan_mean = np.mean(sim_bsaf.data[k])
sim_bsaf.data[k] -= chan_mean
sim_bsaf.data[k][lower_ind:upper_ind] += x[:(upper_ind-lower_ind)]*P[k]*sph_loss
sim_bsaf.data[k] += chan_mean
# Limit signal range
for k in sim_bsaf.data.keys():
sim_bsaf.data[k].clip(0, max_val, out=sim_bsaf.data[k])
if sim_bsaf.writefile(options.output_filename, prevent_overwrite=False) == False:
print "Error while saving simulation results."
# Generate spatial trajectory in (sim) d3 file, if requested
if options.make_d3_traj:
d3_fps = 250.
num_d3_samples = np.ceil(options.duration*d3_fps)
d3a = dict()
d3a["endframe"] = 0.
d3a["startframe"] = -num_d3_samples+1.
d3a["fvideo"] = 250. # 250 fps
d3a["object"] = dict()
d3a["object"]["name"] = "bat"
d3a["object"]["video"] = np.outer(np.ones(int(num_d3_samples)), src_pos[:3])
d3a_filename = options.output_filename[:options.output_filename.rfind(".bin")]+"_d3.mat"
try:
sio.savemat(d3a_filename, {"d3_analysed": d3a})
except:
print "Error: failed to save sim d3 trajectory file, " + d3a_filename
| bsd-3-clause |
DOV-Vlaanderen/pydov | orig/dovboringen.py | 1 | 22536 | """
This module handles the selection of borehole data from the DOV webservice.
Its development was made possible by the financing of Vlaio (Flanders,
Belgium) and AGT n.v (www.agt.be).
"""
__author__ = ['Pieter Jan Haest']
__copyright__ = 'Copyright 2017, DOV-Vlaanderen'
__credits__ = ["Stijn Van Hoey", "Johan Van de Wauw"]
__license__ = "MIT"
__version__ = '0.1'
__maintainer__ = "Pieter Jan Haest"
__email__ = "geo.haest@gmail.com"
__status__ = "Development"
from owslib.wfs import WebFeatureService
from owslib import fes
import xmltodict
import pandas as pd
import numpy as np
import os
class DOVVariableError(Exception):
pass
class DovBoringen(object):
"""
Examples
--------
>>> dov = DovBoringen()
>>> # for a downloaded XML file do:
>>> filepth = os.path.join(r'../tests/data','hcov.xml')
>>> # choose between ['hydrogeologischeinterpretatie',
>>> # 'geotechnischecodering', 'gecodeerdelithologie']
>>> df_boringen = dov.get_boringen_data(filepth,
>>> 'hydrogeologischeinterpretatie')
>>> df_boringen.shape
(397, 7)
>>> # if the system of dov.vlaanderen.be allows for in-line querying of
>>> # interpretation data you can do
>>> extracted_locations = dov.get_boringen(
>>> bbox=(160000, 200000, 178100, 215100))
>>> df_boringen = dov.get_boringen_data(extracted_locations,
>>> 'hydrogeologischeinterpretatie')
"""
def __init__(self, url='https://www.dov.vlaanderen.be/geoserver/wfs',
version='2.0.0', layer='dov-pub:Boringen',
maxfeatures=10000, timeout=30, outputformat='xml',
epsg='31370'):
""" Initialize DovBoringen instance to read borehole data for
selected locations in xml
Parameters
----------
url : str
url string
version : str
the version on the wfs_boring
layer : str
the layer with the general borehole data
maxfeatures : int
the maximum number of features that will be obtained from the
wfs_boring
timeout : int
time in seconds after which requests should time-out
outputformat : str
the format that is returned from the wfs_boring
epsg : int
the epsg code in which data should be retrieved. Default value
for Lambert72: 31370
"""
# define general wfs_boring info
self.layer = layer
self.outputformat = 'application/' + outputformat
self.timeout = timeout
self.wfs_boring = WebFeatureService(url=url, version=version)
# check version
self.version = self.wfs_boring.identification.version
# check contents through: self.wfs_boring.contents['dov-pub:Boringen']
self.maxfeatbool = False # a boolean indicating that a limit exists
# on the queryable features
self.srsname = 'urn:x-ogc:def:crs:EPSG:' + epsg
try:
server_maxfeatures = int(
self.wfs_boring.constraints['DefaultMaxFeatures'].values[0])
if server_maxfeatures < maxfeatures:
self.maxfeatures = server_maxfeatures
self.maxfeatbool = True
else:
self.maxfeatures = maxfeatures
except (KeyError, IndexError):
self.maxfeatures = maxfeatures
# define variables
self.interpretations = ['gecodeerde_lithologie',
'geotechnische_codering',
'hydrogeologische_stratigrafie',
'informele_hydrostratigrafie',
'informele_stratigrafie',
'lithologische_beschrijving', ]
self.property_names = ['diepte_tot_m', 'dikte_quartair',
'formele_stratigrafie', 'gecodeerde_lithologie',
'geotechnische_codering',
'hydrogeologische_stratigrafie',
'informele_hydrostratigrafie',
'informele_stratigrafie',
'lithologische_beschrijving', ]
# http://docs.geoserver.org/latest/en/user/filter/filter_reference.html
self.wfs_filters = {'=': fes.PropertyIsEqualTo,
'!=': fes.PropertyIsNotEqualTo,
'<': fes.PropertyIsLessThan,
'<=': fes.PropertyIsLessThanOrEqualTo,
'>': fes.PropertyIsGreaterThan,
'>=': fes.PropertyIsGreaterThanOrEqualTo,
'<<': fes.PropertyIsBetween, }
# define the key-cols to retrieve data from the xml
# this is currently misleading since the elements are hardcoded in
# extract_boringen()
# (renamed) common attributes of the interpretation to keep
interpretation_atts = ['boringid', 'betrouwbaar', 'opdracht',
'laag_van', 'laag_tot']
self.df_cols_dict = {
'boringen': ['boringid', 'x', 'y', 'mv', 'boring_van',
'boring_tot',
'methode'],
'gecodeerdelithologie': interpretation_atts +
['prim_grondsoort', 'sec_grondsoort', 'hoeveelheid',
'plaatselijk'],
'geotechnischecodering': interpretation_atts +
['prim_grondsoort', 'sec_grondsoort'],
'hydrogeologischeinterpretatie': interpretation_atts +
['aquifer', 'regime'], }
def get_boringen(self, query_string='', bbox=None, add_props=[]):
"""Query the wfs_boring for a all boreholes within a selected
bounding box or given constraints.
A dataframe containing the selected variables is returned. The
following variables are always included:
'boornummer', 'fiche', 'X_ml72', 'Y_ml72', 'Z_mTAW', 'methode',
'diepte_tot_m', 'dikte_quartair',
'formele_stratigrafie', 'gecodeerde_lithologie',
'geotechnische_codering', 'hydrogeologische_stratigrafie',
'informele_hydrostratigrafie', 'informele_stratigrafie',
'lithologische_beschrijving'
Additional variables from the xml can be selected by providing the
PropertyName in the list of add_props.
The following variables are remapped to a more readable name in the
resulting dataframe:
{fiche: url_data, X_ml72: x, Y_ml72: y, Z_mTAW: z_mv}.
Parameters
----------
query_string : str
A string containing the query that will be used as constrained in
the WFS call
bbox : tuple of floats
The X, Y coordinates of the bounding box as (xmin, ymin, xmax,
ymax)
add_props : list of strings
A list with the PropertyNames of attributes in the queried layer
that need to be selected in addition
to the default ones
Returns
-------
boringen_df : dataframe
A dataframe with the selected attributes of the selected borehole
locations
"""
# extract data with user-defined column names
user_defined = ['boornummer', 'url_data', 'x', 'y', 'z_mv', 'methode']
dov_defined = ['boornummer', 'fiche', 'X_mL72', 'Y_mL72', 'Z_mTAW',
'methode']
# get list with queried properties (different for version 1.1.0 and
# 2.0.0):
variables1 = dov_defined + self.property_names + add_props
variables2 = ['dov-pub:' + x for x in variables1]
propertynames = variables1 if self.version == '1.1.0' else variables2
# query the wfs layer for the given constraints in the bounding box
# filterxml = self.compose_query(query_string, bbox, self.wfs_filters)
response = self.wfs_boring.getfeature(typename=self.layer,
propertyname=propertynames,
# gives service exception for
# version 1.1.0
maxfeatures=self.maxfeatures,
# filter=filterxml,
# took ages to query, bbox is
# faster
bbox=bbox,
# outputFormat=self.outputformat
# xml is only option and
# possible error
# srsname=self.srsname # does
# not work for version 2.0.0
)
if self.version == '1.1.0':
boringen_df = pd.DataFrame(
self.parse_wfs(response, self.layer, self.version),
columns=variables1
).rename(columns=dict(zip(dov_defined, user_defined)))
elif self.version == '2.0.0':
boringen_df = pd.DataFrame(
self.parse_wfs(response, self.layer, self.version),
columns=variables2
).rename(columns=dict(
zip(['dov-pub:' + x for x in dov_defined], user_defined)))
return boringen_df
@staticmethod
def parse_wfs(response, layer, version):
"""A generator to parse the response from a wfs, depending on the
server version
Parameters
----------
response : StringIO
The response from a wfs.getfeature() query (OWSlib)
layer : str
The wfs layer that is queried
version : str
The version of the WFS server: only '1.1.0' and '2.0.0'
"""
if version == "1.1.0":
# convert layer preposition to null
layer = 'null:' + layer.split(':')[1]
# convert the response to a dictionary
doc = xmltodict.parse(response)
# yield the layers of the dict
for a in doc['wfs:FeatureCollection']['gml:featureMembers']:
yield (a[layer])
elif version == "2.0.0":
# convert the response to a dictionary
doc = xmltodict.parse(response.read())
# yield the layers of the dict
for a in doc['wfs:FeatureCollection']['wfs:member']:
yield (a[layer])
@staticmethod
def compose_query(query_string, bbox, wfs_filters):
"""Compose a wfs filter query from a string
The query string should be composed as: "property_name operator
literal". The property names and operators are
initialized with the DovBoringen class.
Multiple filters can be added by comma separation e.g.:
"property_name1 operator1 literal1, property_name2 operator2 literal2"
The PropertyIsBetween operator requires a lower and upper boundary,
it is given by a tuple in the string, e.g.:
"diepte_tot_m << (20,100)"
Parameters
----------
query_string : str
A string containing the query that will be used as constrained in
the WFS call. See also: get_boringen()
bbox : tuple of floats, or empty tuple
The X, Y coordinates of the bounding box as (xmin, ymin, xmax,
ymax)
wfs_filters : dict
A dictionary mapping the operator in the query string to the
comparison operator of the wfs call
Returns
-------
filterxml : str
A string of the xml constraint for a wfs call using owslib
"""
filters = []
# extract criteria
if query_string:
query_raw = [x.strip(' ,') for x in query_string.split(' ')]
if len(query_raw) % 3 != 0:
raise ValueError('The query string is not correct. '
'It should be composed of "property operator '
'literal"')
idx = 1
for fltr in query_raw[1::3]:
if fltr != '<<':
filters.append(
wfs_filters[fltr](propertyname=query_raw[idx - 1],
literal=query_raw[idx + 1]))
else:
lb, ub = [x.strip(['(', ')']) for x in
query_raw[idx + 1].split(',')]
filters.append(
wfs_filters[fltr](propertyname=query_raw[idx - 1],
lowerboundary=lb, upperboundary=ub))
idx += 3
if bbox:
filters.append(fes.BBox(bbox))
if len(filters) == 1:
filter = fes.FilterRequest().setConstraint(filters[0])
elif len(filters) > 1:
# only logical AND is evaluated (constraint = [[a, b]])
filter = fes.FilterRequest().setConstraintList([filters])
else:
return ''
filterxml = fes.etree.tostring(filter, encoding="utf-8", method='xml')
return filterxml
@staticmethod
def extract_boringen_urls(urls, interpretation, *args):
"""
"""
# Don't know if a generator will work here (since you need to combine
# multiple levels from 'boringen' and
# 'interpretations' --> to be checked once dov.vlaanderen.be supports
# this querying of xmls for interpretations
print('This option is not supported yet')
return None
def extract_boringen_file(self, file, interpretation):
"""Extract the interpretation from the XML file obtained from
dov.vlaanderen.be for 'boringen'
Currently only ['hydrogeologischeinterpretatie',
'geotechnischecodering', 'gecodeerdelithologie'] are supported.
Mind that the extraction of elements is hardcoded and not governed by
the self.df_cols_dict. This could be an
improvement of the code if anyone knows how to construct the xml tree
structure from lists.
In addition, multiple layers are supported for the 'boringen' and
'interpretation' by joining the data where
'laag_van' >= 'boring_van' and 'laag_tot' <= 'boring_tot' for each
boring.
Parameters
----------
file : str
The path where the xml file is located
interpretation: str
The interpretation that should be extracted from the XML file
Returns
-------
result : pd.DataFrame
A dataframe with the attributes of the boringen and the
interpretation defined by self.df_cols_dict
"""
with open(file) as fd:
xml_data = xmltodict.parse(fd.read())
tmp = []
for loc in xml_data['ns2:dov-schema']['boring']:
# sometimes multiple methods during one drilling in xml
if isinstance(loc['details']['boormethode'], list):
for met in loc['details']['boormethode']:
tmp.append([loc['identificatie'],
float(loc['xy']['x']),
float(loc['xy']['y']),
float(loc['oorspronkelijk_maaiveld'][
'waarde']),
float(met['van']),
float(met['tot']),
met['methode']])
else:
tmp.append([loc['identificatie'],
float(loc['xy']['x']),
float(loc['xy']['y']),
float(
loc['oorspronkelijk_maaiveld']['waarde']),
float(loc['details']['boormethode']['van']),
float(loc['details']['boormethode']['tot']),
loc['details']['boormethode']['methode']])
df_boring = pd.DataFrame(tmp,
columns=self.df_cols_dict['boringen'])
# check if interpretation present
if interpretation not in xml_data['ns2:dov-schema'][
'interpretaties']:
print(
'No ' + interpretation + ' is present in the given XML '
'file')
return None
# else parse the xml
tmp = []
for boring in xml_data['ns2:dov-schema']['interpretaties'][
interpretation]:
if interpretation == 'hydrogeologischeinterpretatie':
for laag in boring['laag']:
tmp.append([boring['boring'],
boring['betrouwbaarheid'],
boring['opdracht'],
float(laag['van']),
float(laag['tot']),
laag['aquifer'],
laag[
'regime'] if 'regime' in laag.keys()
else None])
df_interpr = pd.DataFrame(tmp, columns=self.df_cols_dict[
interpretation])
result = pd.DataFrame(
columns=self.df_cols_dict['boringen'] +
self.df_cols_dict[interpretation][1:])
elif interpretation == 'gecodeerdelithologie':
for laag in boring['laag']:
tmp.append([boring['boring'],
boring['betrouwbaarheid'],
boring['opdracht'],
float(laag['van']),
float(laag['tot']),
laag['hoofdnaam']['grondsoort'],
laag['bijmenging']['grondsoort'],
laag['bijmenging']['hoeveelheid'],
laag['bijmenging']['plaatselijk']])
df_interpr = pd.DataFrame(tmp,
columns=self.df_cols_dict[
interpretation])
result = pd.DataFrame(
columns=self.df_cols_dict['boringen'] +
self.df_cols_dict[interpretation][1:])
elif interpretation == 'geotechnischecodering':
for laag in boring['laag']:
tmp.append([boring['boring'],
boring['betrouwbaarheid'],
boring['opdracht'],
float(laag['van']),
float(laag['tot']),
laag['hoofdnaam']['grondsoort'],
laag['bijmenging']['grondsoort']])
df_interpr = pd.DataFrame(tmp,
columns=self.df_cols_dict[
interpretation])
result = pd.DataFrame(
columns=self.df_cols_dict['boringen'] +
self.df_cols_dict[interpretation][1:])
# group boringen and interpretation
boringen = df_boring.groupby('boringid')
for boring in boringen:
for level in boring[1]['boring_van'].unique():
idx_interpr = np.where((df_interpr['boringid'] == boring[0]) &
(df_interpr['laag_van'] >=
boring[1]['boring_van'].values[0]) &
(df_interpr['laag_tot'] <=
boring[1]['boring_tot'].values[0]))
result = result.append(
pd.merge(boring[1], df_interpr.ix[idx_interpr]))
return result
def get_boringen_data(self, boringen, interpretation):
"""Retrieve the data from the boringen of an on-line xml query or
downloaded xml file
Parameters
----------
boringen : pd.DataFrame or str
The pointer to the xml datasource: on-line as a pd.DataFrame from
self.get_boringen(), or from an XML
file downloaded from dov.vlaanderen.be (for boringen)
interpretation : str
The interpretation one would like to extract from the XML file
Returns
-------
result : pd.DataFrame
A dataframe with the attributes of the boringen and the
interpretation defined by self.df_cols_dict
"""
if isinstance(boringen, pd.DataFrame):
data_boringen = pd.DataFrame(list(self.extract_boringen_urls(
boringen[boringen[interpretation] == 'true'][
'url_data'].values)
),
columns=self.df_cols_dict[interpretation]
)
elif isinstance(boringen, str):
data_boringen = pd.DataFrame(
self.extract_boringen_file(boringen, interpretation),
columns=self.df_cols_dict[interpretation]
)
return data_boringen
if __name__ == '__main__':
dov = DovBoringen(maxfeatures=10)
query_str = 'diepte_tot_m > 20'
# extracted_locations = dov.get_boringen(query_string=query_str,
# bbox=(160000, 200000, 178100, 215100))
# df_boringen = dov.get_boringen_data(extracted_locations) # currently
# not supported by dov.vlaanderen.be
path_to_test = os.path.abspath(os.path.join(__file__, '../..'))
filepth = os.path.join(path_to_test, 'tests', 'data', 'hcov.xml')
# ['hydrogeologischeinterpretatie', 'geotechnischecodering',
# 'gecodeerdelithologie']
df_boringen = dov.get_boringen_data(filepth,
'hydrogeologischeinterpretatie')
| mit |
paranoya/Sexplorer | koala_focus.py | 1 | 11796 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Python script to estimate AAT focus position using 2 FWHM values
# By Angel R. Lopez-Sanchez (AAO/MQU)
# Version 1.1 - 27 Feb 2018
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
# Tkinter is for python 2; tkinter is for python 3
if sys.version_info[0] < 3:
import Tkinter as tk
import tkMessageBox, tkFileDialog
else:
import tkinter as tk
from tkinter import messagebox as tkMessageBox
from tkinter import filedialog as tkFileDialog
class MainApp(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
self.parent.title('Focusing KOALA @ AAT')
# call the widgets
self.ComputeButton()
self.FWHMButton()
self.FWHMButton1()
self.FWHMButton2()
self.getdata()
self.canvas()
self.scale=1.0
def fwhmKOALA(self):
self.scale = 1.0
self.FWHMButton.configure(bg ="green")
self.FWHMButton1.configure(bg ="white")
self.FWHMButton2.configure(bg ="white")
print "KOALA scale selected, FWHM given in arcsec"
def fwhmBIN1(self):
self.scale = 0.16
self.FWHMButton.configure(bg ="white")
self.FWHMButton1.configure(bg ="green")
self.FWHMButton2.configure(bg ="white")
print "TV DIRECT binning 1 scale selected, FWHM given in 0.16 arcsec / pix "
def fwhmBIN2(self):
self.scale = 0.32
self.FWHMButton.configure(bg ="white")
self.FWHMButton1.configure(bg ="white")
self.FWHMButton2.configure(bg ="green")
print "TV DIRECT binning 2 scale selected, FWHM given in 0.32 arcsec / pix "
# Compute!
def compute(self):
focus=[]
fwhm1=[]
fwhm2=[]
try:
focus.append(float(self.FOCUS1.get()))
fwhm1.append(float(self.FWHM11.get()))
fwhm2.append(float(self.FWHM21.get()))
except ValueError:
print "No data in row 1"
try:
focus.append(float(self.FOCUS2.get()))
fwhm1.append(float(self.FWHM12.get()))
fwhm2.append(float(self.FWHM22.get()))
except ValueError:
print "No data in row 2"
try:
focus.append(float(self.FOCUS3.get()))
fwhm1.append(float(self.FWHM13.get()))
fwhm2.append(float(self.FWHM23.get()))
except ValueError:
print "No data in row 3"
try:
focus.append(float(self.FOCUS4.get()))
fwhm1.append(float(self.FWHM14.get()))
fwhm2.append(float(self.FWHM24.get()))
except ValueError:
print "No data in row 4"
try:
focus.append(float(self.FOCUS5.get()))
fwhm1.append(float(self.FWHM15.get()))
fwhm2.append(float(self.FWHM25.get()))
except ValueError:
print "No data in row 5"
try:
focus.append(float(self.FOCUS6.get()))
fwhm1.append(float(self.FWHM16.get()))
fwhm2.append(float(self.FWHM26.get()))
except ValueError:
print "No data in row 6"
try:
focus.append(float(self.FOCUS7.get()))
fwhm1.append(float(self.FWHM17.get()))
fwhm2.append(float(self.FWHM27.get()))
except ValueError:
print "No data in row 7"
try:
focus.append(float(self.FOCUS8.get()))
fwhm1.append(float(self.FWHM18.get()))
fwhm2.append(float(self.FWHM28.get()))
except ValueError:
print "No data in row 8"
try:
focus.append(float(self.FOCUS9.get()))
fwhm1.append(float(self.FWHM19.get()))
fwhm2.append(float(self.FWHM29.get()))
except ValueError:
print "No data in row 9"
try:
focus.append(float(self.FOCUS10.get()))
fwhm1.append(float(self.FWHM110.get()))
fwhm2.append(float(self.FWHM210.get()))
except ValueError:
print "No data in row 10"
try:
focus.append(float(self.FOCUS11.get()))
fwhm1.append(float(self.FWHM111.get()))
fwhm2.append(float(self.FWHM211.get()))
except ValueError:
print "No data in row 11"
try:
focus.append(float(self.FOCUS12.get()))
fwhm1.append(float(self.FWHM112.get()))
fwhm2.append(float(self.FWHM212.get()))
except ValueError:
print "No data in row 12"
fwhm1 = np.array(fwhm1) * self.scale
fwhm2 = np.array(fwhm2) * self.scale
afwhm=(fwhm1+fwhm2)/2
fwhm=afwhm.tolist()
#fits
a2,a1,a0 = np.polyfit(focus,fwhm,2)
a2b,a1b,a0b = np.polyfit(focus,fwhm1,2)
a2r,a1r,a0r = np.polyfit(focus,fwhm2,2)
xmax=np.nanmax(focus)+0.4
xmin=np.nanmin(focus)-0.4
xx=np.arange(100)/100.* (xmax-xmin)+ xmin
fit = a0 + a1*xx + a2*xx**2
fitb = a0b + a1b*xx + a2b*xx**2
fitr = a0r + a1r*xx + a2r*xx**2
seeing = round(np.nanmin(fit),2)
bestfocus=round((xx[np.where(fit ==np.nanmin(fit))[0]][0]),2)
seeingr = round(np.nanmin(fitr),2)
bestfocusr=round((xx[np.where(fitr ==np.nanmin(fitr))[0]][0]),2)
seeingb = round(np.nanmin(fitb),2)
bestfocusb=round((xx[np.where(fitb ==np.nanmin(fitb))[0]][0]),2)
print " Focus values =",focus
print " FWHM values =",fwhm
print " Best seeing =",seeing,'" b =',seeingb,'" r =',seeingr,'"'
print " Focus position =",bestfocus,"mm b =",bestfocusb,"mm r =",bestfocusr,"mm"
result="Focus position: "
result += str(bestfocus)
result +=" mm Best seeing : "
result += str(seeing)
result +='"'
if self.scale == 1.0:
result +=' Best RED seeing : '
result += str(seeingr)
result +='"'
tbestfocus = tk.Label(self, text=result, background='lightblue')
tbestfocus.grid(column=3, row=14, sticky="nesw")
f = Figure(figsize=(10,8))
a = f.add_subplot(111)
a.plot(focus,fwhm, 'o', ms=20)
a.plot(xx,fit)
a.plot([bestfocus],[seeing],"s", color="green",ms=5)
a.plot([bestfocusb],[seeingb],"s", color="blue",ms=5)
a.plot(xx,fitb, color="blue", alpha=0.3)
a.plot([bestfocusr],[seeingr],"s", color="red",ms=5)
a.plot(xx,fitr, color="red", alpha=0.3)
a.set_xlabel("Focus value [mm]")
a.set_ylabel('FWHM ["]')
self.canvas = FigureCanvasTkAgg(f, master=self)
self.canvas.get_tk_widget().grid(column=3, row=1, rowspan=13, sticky="nesw")
### Compute button
def ComputeButton(self):
self.ComputeButton = tk.Button(self, text='Compute!', command=self.compute)
self.ComputeButton.grid(column=0, row=14, columnspan=3, sticky="nesw")
### FWHM size
def FWHMButton(self):
self.FWHMButton = tk.Button(self, text='KOALA', command=self.fwhmKOALA, bg="green")
self.FWHMButton.grid(column=0, row=13, sticky="nesw")
def FWHMButton1(self):
self.FWHMButton1 = tk.Button(self, text='TV 1x1', command=self.fwhmBIN1, bg="white")
self.FWHMButton1.grid(column=1, row=13, sticky="nesw")
def FWHMButton2(self):
self.FWHMButton2 = tk.Button(self, text='TV 2x2', command=self.fwhmBIN2, bg="white")
self.FWHMButton2.grid(column=2, row=13, sticky="nesw")
# get data
def getdata(self):
self.textFocus = tk.Label(self, text="Focus")
self.textFocus.grid(column=0, row=0, sticky="nesw")
self.textFWHM1 = tk.Label(self, text="FWHM b")
self.textFWHM1.grid(column=1, row=0, sticky="nesw")
self.textFWHM2 = tk.Label(self, text="FWHM r")
self.textFWHM2.grid(column=2, row=0, sticky="nesw")
self.FOCUS1 = tk.Entry(self, width=6, bg="yellow")
self.FOCUS2 = tk.Entry(self, width=6, bg="yellow")
self.FOCUS3 = tk.Entry(self, width=6, bg="yellow")
self.FOCUS4 = tk.Entry(self, width=6, bg="yellow")
self.FOCUS5 = tk.Entry(self, width=6, bg="yellow")
self.FOCUS6 = tk.Entry(self, width=6, bg="yellow")
self.FOCUS7 = tk.Entry(self, width=6, bg="yellow")
self.FOCUS8 = tk.Entry(self, width=6, bg="yellow")
self.FOCUS9 = tk.Entry(self, width=6, bg="yellow")
self.FOCUS10 = tk.Entry(self, width=6, bg="yellow")
self.FOCUS11 = tk.Entry(self, width=6, bg="yellow")
self.FOCUS12 = tk.Entry(self, width=6, bg="yellow")
self.FWHM11 = tk.Entry(self, width=6, bg="yellow")
self.FWHM12 = tk.Entry(self, width=6, bg="yellow")
self.FWHM13 = tk.Entry(self, width=6, bg="yellow")
self.FWHM14 = tk.Entry(self, width=6, bg="yellow")
self.FWHM15 = tk.Entry(self, width=6, bg="yellow")
self.FWHM16 = tk.Entry(self, width=6, bg="yellow")
self.FWHM17 = tk.Entry(self, width=6, bg="yellow")
self.FWHM18 = tk.Entry(self, width=6, bg="yellow")
self.FWHM19 = tk.Entry(self, width=6, bg="yellow")
self.FWHM110 = tk.Entry(self, width=6, bg="yellow")
self.FWHM111 = tk.Entry(self, width=6, bg="yellow")
self.FWHM112 = tk.Entry(self, width=6, bg="yellow")
self.FWHM21 = tk.Entry(self, width=6, bg="yellow")
self.FWHM22 = tk.Entry(self, width=6, bg="yellow")
self.FWHM23 = tk.Entry(self, width=6, bg="yellow")
self.FWHM24 = tk.Entry(self, width=6, bg="yellow")
self.FWHM25 = tk.Entry(self, width=6, bg="yellow")
self.FWHM26 = tk.Entry(self, width=6, bg="yellow")
self.FWHM27 = tk.Entry(self, width=6, bg="yellow")
self.FWHM28 = tk.Entry(self, width=6, bg="yellow")
self.FWHM29 = tk.Entry(self, width=6, bg="yellow")
self.FWHM210 = tk.Entry(self, width=6, bg="yellow")
self.FWHM211 = tk.Entry(self, width=6, bg="yellow")
self.FWHM212 = tk.Entry(self, width=6, bg="yellow")
self.FOCUS1.grid(column=0, row=1, sticky="nesw")
self.FOCUS2.grid(column=0, row=2, sticky="nesw")
self.FOCUS3.grid(column=0, row=3, sticky="nesw")
self.FOCUS4.grid(column=0, row=4, sticky="nesw")
self.FOCUS5.grid(column=0, row=5, sticky="nesw")
self.FOCUS6.grid(column=0, row=6, sticky="nesw")
self.FOCUS7.grid(column=0, row=7, sticky="nesw")
self.FOCUS8.grid(column=0, row=8, sticky="nesw")
self.FOCUS9.grid(column=0, row=9, sticky="nesw")
self.FOCUS10.grid(column=0, row=10, sticky="nesw")
self.FOCUS11.grid(column=0, row=11, sticky="nesw")
self.FOCUS12.grid(column=0, row=12, sticky="nesw")
self.FWHM11.grid(column=1, row=1, sticky="nesw")
self.FWHM12.grid(column=1, row=2, sticky="nesw")
self.FWHM13.grid(column=1, row=3, sticky="nesw")
self.FWHM14.grid(column=1, row=4, sticky="nesw")
self.FWHM15.grid(column=1, row=5, sticky="nesw")
self.FWHM16.grid(column=1, row=6, sticky="nesw")
self.FWHM17.grid(column=1, row=7, sticky="nesw")
self.FWHM18.grid(column=1, row=8, sticky="nesw")
self.FWHM19.grid(column=1, row=9, sticky="nesw")
self.FWHM110.grid(column=1, row=10, sticky="nesw")
self.FWHM111.grid(column=1, row=11, sticky="nesw")
self.FWHM112.grid(column=1, row=12, sticky="nesw")
self.FWHM21.grid(column=2, row=1, sticky="nesw")
self.FWHM22.grid(column=2, row=2, sticky="nesw")
self.FWHM23.grid(column=2, row=3, sticky="nesw")
self.FWHM24.grid(column=2, row=4, sticky="nesw")
self.FWHM25.grid(column=2, row=5, sticky="nesw")
self.FWHM26.grid(column=2, row=6, sticky="nesw")
self.FWHM27.grid(column=2, row=7, sticky="nesw")
self.FWHM28.grid(column=2, row=8, sticky="nesw")
self.FWHM29.grid(column=2, row=9, sticky="nesw")
self.FWHM210.grid(column=2, row=10, sticky="nesw")
self.FWHM211.grid(column=2, row=11, sticky="nesw")
self.FWHM212.grid(column=2, row=12, sticky="nesw")
# Canvas
def canvas(self):
self.f = Figure(figsize=(10,8))
self.canvas = FigureCanvasTkAgg(self.f, master=self)
self.canvas.get_tk_widget().grid(column=3, row=1, rowspan=13, sticky="nesw")
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self.parent)
if __name__ == "__main__":
root = tk.Tk()
# root.geometry("1000x730+10+10")
root.resizable(0, 0)
MainApp(root).pack(side=tk.TOP)
root.mainloop()
| mit |
dsm054/pandas | asv_bench/benchmarks/io/excel.py | 5 | 1173 | import numpy as np
from pandas import DataFrame, date_range, ExcelWriter, read_excel
from pandas.compat import BytesIO
import pandas.util.testing as tm
class Excel(object):
params = ['openpyxl', 'xlsxwriter', 'xlwt']
param_names = ['engine']
def setup(self, engine):
N = 2000
C = 5
self.df = DataFrame(np.random.randn(N, C),
columns=['float{}'.format(i) for i in range(C)],
index=date_range('20000101', periods=N, freq='H'))
self.df['object'] = tm.makeStringIndex(N)
self.bio_read = BytesIO()
self.writer_read = ExcelWriter(self.bio_read, engine=engine)
self.df.to_excel(self.writer_read, sheet_name='Sheet1')
self.writer_read.save()
self.bio_read.seek(0)
def time_read_excel(self, engine):
read_excel(self.bio_read)
def time_write_excel(self, engine):
bio_write = BytesIO()
bio_write.seek(0)
writer_write = ExcelWriter(bio_write, engine=engine)
self.df.to_excel(writer_write, sheet_name='Sheet1')
writer_write.save()
from ..pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
datapythonista/pandas | pandas/io/formats/style.py | 1 | 91976 | """
Module for applying conditional formatting to DataFrames and Series.
"""
from __future__ import annotations
from contextlib import contextmanager
import copy
from functools import partial
import operator
from typing import (
Any,
Callable,
Hashable,
Sequence,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._typing import (
Axis,
FilePathOrBuffer,
FrameOrSeries,
FrameOrSeriesUnion,
IndexLabel,
Scalar,
)
from pandas.compat._optional import import_optional_dependency
from pandas.util._decorators import doc
import pandas as pd
from pandas import RangeIndex
from pandas.api.types import is_list_like
from pandas.core import generic
import pandas.core.common as com
from pandas.core.frame import (
DataFrame,
Series,
)
from pandas.core.generic import NDFrame
from pandas.io.formats.format import save_to_buffer
jinja2 = import_optional_dependency("jinja2", extra="DataFrame.style requires jinja2.")
from pandas.io.formats.style_render import (
CSSProperties,
CSSStyles,
StylerRenderer,
Subset,
Tooltips,
maybe_convert_css_to_tuples,
non_reducing_slice,
)
try:
from matplotlib import colors
import matplotlib.pyplot as plt
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func: Callable):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(StylerRenderer):
r"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
Parameters
----------
data : Series or DataFrame
Data to be styled - either a Series or DataFrame.
precision : int
Precision to round floats to, defaults to pd.options.display.precision.
table_styles : list-like, default None
List of {selector: (attr, value)} dicts; see Notes.
uuid : str, default None
A unique identifier to avoid CSS collisions; generated automatically.
caption : str, default None
Caption to attach to the table.
table_attributes : str, default None
Items that show up in the opening ``<table>`` tag
in addition to automatic (by default) id.
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
na_rep : str, optional
Representation for missing values.
If ``na_rep`` is None, no special formatting is applied.
.. versionadded:: 1.0.0
uuid_len : int, default 5
If ``uuid`` is not specified, the length of the ``uuid`` to randomly generate
expressed in hex characters, in range [0, 32].
.. versionadded:: 1.2.0
decimal : str, default "."
Character used as decimal separator for floats, complex and integers
.. versionadded:: 1.3.0
thousands : str, optional, default None
Character used as thousands separator for floats, complex and integers
.. versionadded:: 1.3.0
escape : str, optional
Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"``
in cell display string with HTML-safe sequences.
Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``,
``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with
LaTeX-safe sequences.
... versionadded:: 1.3.0
Attributes
----------
env : Jinja2 jinja2.Environment
template : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
DataFrame.style : Return a Styler object containing methods for building
a styled HTML representation for the DataFrame.
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``level<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
"""
def __init__(
self,
data: FrameOrSeriesUnion,
precision: int | None = None,
table_styles: CSSStyles | None = None,
uuid: str | None = None,
caption: str | None = None,
table_attributes: str | None = None,
cell_ids: bool = True,
na_rep: str | None = None,
uuid_len: int = 5,
decimal: str = ".",
thousands: str | None = None,
escape: str | None = None,
):
super().__init__(
data=data,
uuid=uuid,
uuid_len=uuid_len,
table_styles=table_styles,
table_attributes=table_attributes,
caption=caption,
cell_ids=cell_ids,
)
# validate ordered args
self.precision = precision # can be removed on set_precision depr cycle
self.na_rep = na_rep # can be removed on set_na_rep depr cycle
self.format(
formatter=None,
precision=precision,
na_rep=na_rep,
escape=escape,
decimal=decimal,
thousands=thousands,
)
def _repr_html_(self) -> str:
"""
Hooks into Jupyter notebook rich display system.
"""
return self.render()
def render(
self,
sparse_index: bool | None = None,
sparse_columns: bool | None = None,
**kwargs,
) -> str:
"""
Render the ``Styler`` including all applied styles to HTML.
Parameters
----------
sparse_index : bool, optional
Whether to sparsify the display of a hierarchical index. Setting to False
will display each explicit level element in a hierarchical key for each row.
Defaults to ``pandas.options.styler.sparse.index`` value.
sparse_columns : bool, optional
Whether to sparsify the display of a hierarchical index. Setting to False
will display each explicit level element in a hierarchical key for each row.
Defaults to ``pandas.options.styler.sparse.columns`` value.
**kwargs
Any additional keyword arguments are passed
through to ``self.template.render``.
This is useful when you need to provide
additional variables for a custom template.
Returns
-------
rendered : str
The rendered HTML.
Notes
-----
Styler objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* table_styles
* caption
* table_attributes
"""
if sparse_index is None:
sparse_index = get_option("styler.sparse.index")
if sparse_columns is None:
sparse_columns = get_option("styler.sparse.columns")
return self._render_html(sparse_index, sparse_columns, **kwargs)
def set_tooltips(
self,
ttips: DataFrame,
props: CSSProperties | None = None,
css_class: str | None = None,
) -> Styler:
"""
Set the DataFrame of strings on ``Styler`` generating ``:hover`` tooltips.
These string based tooltips are only applicable to ``<td>`` HTML elements,
and cannot be used for column or index headers.
.. versionadded:: 1.3.0
Parameters
----------
ttips : DataFrame
DataFrame containing strings that will be translated to tooltips, mapped
by identical column and index values that must exist on the underlying
Styler data. None, NaN values, and empty strings will be ignored and
not affect the rendered HTML.
props : list-like or str, optional
List of (attr, value) tuples or a valid CSS string. If ``None`` adopts
the internal default values described in notes.
css_class : str, optional
Name of the tooltip class used in CSS, should conform to HTML standards.
Only useful if integrating tooltips with external CSS. If ``None`` uses the
internal default value 'pd-t'.
Returns
-------
self : Styler
Notes
-----
Tooltips are created by adding `<span class="pd-t"></span>` to each data cell
and then manipulating the table level CSS to attach pseudo hover and pseudo
after selectors to produce the required the results.
The default properties for the tooltip CSS class are:
- visibility: hidden
- position: absolute
- z-index: 1
- background-color: black
- color: white
- transform: translate(-20px, -20px)
The property 'visibility: hidden;' is a key prerequisite to the hover
functionality, and should always be included in any manual properties
specification, using the ``props`` argument.
Tooltips are not designed to be efficient, and can add large amounts of
additional HTML for larger tables, since they also require that ``cell_ids``
is forced to `True`.
Examples
--------
Basic application
>>> df = pd.DataFrame(data=[[0, 1], [2, 3]])
>>> ttips = pd.DataFrame(
... data=[["Min", ""], [np.nan, "Max"]], columns=df.columns, index=df.index
... )
>>> s = df.style.set_tooltips(ttips).render()
Optionally controlling the tooltip visual display
>>> df.style.set_tooltips(ttips, css_class='tt-add', props=[
... ('visibility', 'hidden'),
... ('position', 'absolute'),
... ('z-index', 1)])
>>> df.style.set_tooltips(ttips, css_class='tt-add',
... props='visibility:hidden; position:absolute; z-index:1;')
"""
if not self.cell_ids:
# tooltips not optimised for individual cell check. requires reasonable
# redesign and more extensive code for a feature that might be rarely used.
raise NotImplementedError(
"Tooltips can only render with 'cell_ids' is True."
)
if not ttips.index.is_unique or not ttips.columns.is_unique:
raise KeyError(
"Tooltips render only if `ttips` has unique index and columns."
)
if self.tooltips is None: # create a default instance if necessary
self.tooltips = Tooltips()
self.tooltips.tt_data = ttips
if props:
self.tooltips.class_properties = props
if css_class:
self.tooltips.class_name = css_class
return self
@doc(
NDFrame.to_excel,
klass="Styler",
storage_options=generic._shared_docs["storage_options"],
)
def to_excel(
self,
excel_writer,
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: str | None = None,
columns: Sequence[Hashable] | None = None,
header: Sequence[Hashable] | bool = True,
index: bool = True,
index_label: IndexLabel | None = None,
startrow: int = 0,
startcol: int = 0,
engine: str | None = None,
merge_cells: bool = True,
encoding: str | None = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: tuple[int, int] | None = None,
) -> None:
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
self,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def to_latex(
self,
buf: FilePathOrBuffer[str] | None = None,
*,
column_format: str | None = None,
position: str | None = None,
position_float: str | None = None,
hrules: bool = False,
label: str | None = None,
caption: str | None = None,
sparse_index: bool | None = None,
sparse_columns: bool | None = None,
multirow_align: str = "c",
multicol_align: str = "r",
siunitx: bool = False,
encoding: str | None = None,
):
r"""
Write Styler to a file, buffer or string in LaTeX format.
.. versionadded:: 1.3.0
Parameters
----------
buf : str, Path, or StringIO-like, optional, default None
Buffer to write to. If ``None``, the output is returned as a string.
column_format : str, optional
The LaTeX column specification placed in location:
\\begin{tabular}{<column_format>}
Defaults to 'l' for index and
non-numeric data columns, and, for numeric data columns,
to 'r' by default, or 'S' if ``siunitx`` is ``True``.
position : str, optional
The LaTeX positional argument (e.g. 'h!') for tables, placed in location:
\\begin{table}[<position>]
position_float : {"centering", "raggedleft", "raggedright"}, optional
The LaTeX float command placed in location:
\\begin{table}[<position>]
\\<position_float>
hrules : bool, default False
Set to `True` to add \\toprule, \\midrule and \\bottomrule from the
{booktabs} LaTeX package.
label : str, optional
The LaTeX label included as: \\label{<label>}.
This is used with \\ref{<label>} in the main .tex file.
caption : str, optional
The LaTeX table caption included as: \\caption{<caption>}.
sparse_index : bool, optional
Whether to sparsify the display of a hierarchical index. Setting to False
will display each explicit level element in a hierarchical key for each row.
Defaults to ``pandas.options.styler.sparse.index`` value.
sparse_columns : bool, optional
Whether to sparsify the display of a hierarchical index. Setting to False
will display each explicit level element in a hierarchical key for each row.
Defaults to ``pandas.options.styler.sparse.columns`` value.
multirow_align : {"c", "t", "b"}
If sparsifying hierarchical MultiIndexes whether to align text centrally,
at the top or bottom.
multicol_align : {"r", "c", "l"}
If sparsifying hierarchical MultiIndex columns whether to align text at
the left, centrally, or at the right.
siunitx : bool, default False
Set to ``True`` to structure LaTeX compatible with the {siunitx} package.
encoding : str, default "utf-8"
Character encoding setting.
Returns
-------
str or None
If `buf` is None, returns the result as a string. Otherwise returns `None`.
See Also
--------
Styler.format: Format the text display value of cells.
Notes
-----
**Latex Packages**
For the following features we recommend the following LaTeX inclusions:
===================== ==========================================================
Feature Inclusion
===================== ==========================================================
sparse columns none: included within default {tabular} environment
sparse rows \\usepackage{multirow}
hrules \\usepackage{booktabs}
colors \\usepackage[table]{xcolor}
siunitx \\usepackage{siunitx}
bold (with siunitx) | \\usepackage{etoolbox}
| \\robustify\\bfseries
| \\sisetup{detect-all = true} *(within {document})*
italic (with siunitx) | \\usepackage{etoolbox}
| \\robustify\\itshape
| \\sisetup{detect-all = true} *(within {document})*
===================== ==========================================================
**Cell Styles**
LaTeX styling can only be rendered if the accompanying styling functions have
been constructed with appropriate LaTeX commands. All styling
functionality is built around the concept of a CSS ``(<attribute>, <value>)``
pair (see `Table Visualization <../../user_guide/style.ipynb>`_), and this
should be replaced by a LaTeX
``(<command>, <options>)`` approach. Each cell will be styled individually
using nested LaTeX commands with their accompanied options.
For example the following code will highlight and bold a cell in HTML-CSS:
>>> df = pd.DataFrame([[1,2], [3,4]])
>>> s = df.style.highlight_max(axis=None,
... props='background-color:red; font-weight:bold;')
>>> s.render()
The equivalent using LaTeX only commands is the following:
>>> s = df.style.highlight_max(axis=None,
... props='cellcolor:{red}; bfseries: ;')
>>> s.to_latex()
Internally these structured LaTeX ``(<command>, <options>)`` pairs
are translated to the
``display_value`` with the default structure:
``\<command><options> <display_value>``.
Where there are multiple commands the latter is nested recursively, so that
the above example highlighed cell is rendered as
``\cellcolor{red} \bfseries 4``.
Occasionally this format does not suit the applied command, or
combination of LaTeX packages that is in use, so additional flags can be
added to the ``<options>``, within the tuple, to result in different
positions of required braces (the **default** being the same as ``--nowrap``):
=================================== ============================================
Tuple Format Output Structure
=================================== ============================================
(<command>,<options>) \\<command><options> <display_value>
(<command>,<options> ``--nowrap``) \\<command><options> <display_value>
(<command>,<options> ``--rwrap``) \\<command><options>{<display_value>}
(<command>,<options> ``--wrap``) {\\<command><options> <display_value>}
(<command>,<options> ``--lwrap``) {\\<command><options>} <display_value>
(<command>,<options> ``--dwrap``) {\\<command><options>}{<display_value>}
=================================== ============================================
For example the `textbf` command for font-weight
should always be used with `--rwrap` so ``('textbf', '--rwrap')`` will render a
working cell, wrapped with braces, as ``\textbf{<display_value>}``.
A more comprehensive example is as follows:
>>> df = pd.DataFrame([[1, 2.2, "dogs"], [3, 4.4, "cats"], [2, 6.6, "cows"]],
... index=["ix1", "ix2", "ix3"],
... columns=["Integers", "Floats", "Strings"])
>>> s = df.style.highlight_max(
... props='cellcolor:[HTML]{FFFF00}; color:{red};'
... 'textit:--rwrap; textbf:--rwrap;'
... )
>>> s.to_latex()
.. figure:: ../../_static/style/latex_1.png
**Table Styles**
Internally Styler uses its ``table_styles`` object to parse the
``column_format``, ``position``, ``position_float``, and ``label``
input arguments. These arguments are added to table styles in the format:
.. code-block:: python
set_table_styles([
{"selector": "column_format", "props": f":{column_format};"},
{"selector": "position", "props": f":{position};"},
{"selector": "position_float", "props": f":{position_float};"},
{"selector": "label", "props": f":{{{label.replace(':','§')}}};"}
], overwrite=False)
Exception is made for the ``hrules`` argument which, in fact, controls all three
commands: ``toprule``, ``bottomrule`` and ``midrule`` simultaneously. Instead of
setting ``hrules`` to ``True``, it is also possible to set each
individual rule definition, by manually setting the ``table_styles``,
for example below we set a regular ``toprule``, set an ``hline`` for
``bottomrule`` and exclude the ``midrule``:
.. code-block:: python
set_table_styles([
{'selector': 'toprule', 'props': ':toprule;'},
{'selector': 'bottomrule', 'props': ':hline;'},
], overwrite=False)
If other ``commands`` are added to table styles they will be detected, and
positioned immediately above the '\\begin{tabular}' command. For example to
add odd and even row coloring, from the {colortbl} package, in format
``\rowcolors{1}{pink}{red}``, use:
.. code-block:: python
set_table_styles([
{'selector': 'rowcolors', 'props': ':{1}{pink}{red};'}
], overwrite=False)
A more comprehensive example using these arguments is as follows:
>>> df.columns = pd.MultiIndex.from_tuples([
... ("Numeric", "Integers"),
... ("Numeric", "Floats"),
... ("Non-Numeric", "Strings")
... ])
>>> df.index = pd.MultiIndex.from_tuples([
... ("L0", "ix1"), ("L0", "ix2"), ("L1", "ix3")
... ])
>>> s = df.style.highlight_max(
... props='cellcolor:[HTML]{FFFF00}; color:{red}; itshape:; bfseries:;'
... )
>>> s.to_latex(
... column_format="rrrrr", position="h", position_float="centering",
... hrules=True, label="table:5", caption="Styled LaTeX Table",
... multirow_align="t", multicol_align="r"
... )
.. figure:: ../../_static/style/latex_2.png
**Formatting**
To format values :meth:`Styler.format` should be used prior to calling
`Styler.to_latex`, as well as other methods such as :meth:`Styler.hide_index`
or :meth:`Styler.hide_columns`, for example:
>>> s.clear()
>>> s.table_styles = []
>>> s.caption = None
>>> s.format({
... ("Numeric", "Integers"): '\${}',
... ("Numeric", "Floats"): '{:.3f}',
... ("Non-Numeric", "Strings"): str.upper
... })
>>> s.to_latex()
\begin{tabular}{llrrl}
{} & {} & \multicolumn{2}{r}{Numeric} & {Non-Numeric} \\
{} & {} & {Integers} & {Floats} & {Strings} \\
\multirow[c]{2}{*}{L0} & ix1 & \\$1 & 2.200 & DOGS \\
& ix2 & \$3 & 4.400 & CATS \\
L1 & ix3 & \$2 & 6.600 & COWS \\
\end{tabular}
"""
table_selectors = (
[style["selector"] for style in self.table_styles]
if self.table_styles is not None
else []
)
if column_format is not None:
# add more recent setting to table_styles
self.set_table_styles(
[{"selector": "column_format", "props": f":{column_format}"}],
overwrite=False,
)
elif "column_format" in table_selectors:
pass # adopt what has been previously set in table_styles
else:
# create a default: set float, complex, int cols to 'r' ('S'), index to 'l'
_original_columns = self.data.columns
self.data.columns = RangeIndex(stop=len(self.data.columns))
numeric_cols = self.data._get_numeric_data().columns.to_list()
self.data.columns = _original_columns
column_format = "" if self.hidden_index else "l" * self.data.index.nlevels
for ci, _ in enumerate(self.data.columns):
if ci not in self.hidden_columns:
column_format += (
("r" if not siunitx else "S") if ci in numeric_cols else "l"
)
self.set_table_styles(
[{"selector": "column_format", "props": f":{column_format}"}],
overwrite=False,
)
if position:
self.set_table_styles(
[{"selector": "position", "props": f":{position}"}],
overwrite=False,
)
if position_float:
if position_float not in ["raggedright", "raggedleft", "centering"]:
raise ValueError(
f"`position_float` should be one of "
f"'raggedright', 'raggedleft', 'centering', "
f"got: '{position_float}'"
)
self.set_table_styles(
[{"selector": "position_float", "props": f":{position_float}"}],
overwrite=False,
)
if hrules:
self.set_table_styles(
[
{"selector": "toprule", "props": ":toprule"},
{"selector": "midrule", "props": ":midrule"},
{"selector": "bottomrule", "props": ":bottomrule"},
],
overwrite=False,
)
if label:
self.set_table_styles(
[{"selector": "label", "props": f":{{{label.replace(':', '§')}}}"}],
overwrite=False,
)
if caption:
self.set_caption(caption)
if sparse_index is None:
sparse_index = get_option("styler.sparse.index")
if sparse_columns is None:
sparse_columns = get_option("styler.sparse.columns")
latex = self._render_latex(
sparse_index=sparse_index,
sparse_columns=sparse_columns,
multirow_align=multirow_align,
multicol_align=multicol_align,
)
return save_to_buffer(latex, buf=buf, encoding=encoding)
def to_html(
self,
buf: FilePathOrBuffer[str] | None = None,
*,
table_uuid: str | None = None,
table_attributes: str | None = None,
encoding: str | None = None,
doctype_html: bool = False,
exclude_styles: bool = False,
):
"""
Write Styler to a file, buffer or string in HTML-CSS format.
.. versionadded:: 1.3.0
Parameters
----------
buf : str, Path, or StringIO-like, optional, default None
Buffer to write to. If ``None``, the output is returned as a string.
table_uuid: str, optional
Id attribute assigned to the <table> HTML element in the format:
``<table id="T_<table_uuid>" ..>``
If not given uses Styler's initially assigned value.
table_attributes: str, optional
Attributes to assign within the `<table>` HTML element in the format:
``<table .. <table_attributes> >``
If not given defaults to Styler's preexisting value.
encoding : str, optional
Character encoding setting for file output, and HTML meta tags,
defaults to "utf-8" if None.
doctype_html : bool, default False
Whether to output a fully structured HTML file including all
HTML elements, or just the core ``<style>`` and ``<table>`` elements.
exclude_styles : bool, default False
Whether to include the ``<style>`` element and all associated element
``class`` and ``id`` identifiers, or solely the ``<table>`` element without
styling identifiers.
Returns
-------
str or None
If `buf` is None, returns the result as a string. Otherwise returns `None`.
See Also
--------
DataFrame.to_html: Write a DataFrame to a file, buffer or string in HTML format.
"""
if table_uuid:
self.set_uuid(table_uuid)
if table_attributes:
self.set_table_attributes(table_attributes)
# Build HTML string..
html = self.render(
exclude_styles=exclude_styles,
encoding=encoding if encoding else "utf-8",
doctype_html=doctype_html,
)
return save_to_buffer(
html, buf=buf, encoding=(encoding if buf is not None else None)
)
def set_td_classes(self, classes: DataFrame) -> Styler:
"""
Set the DataFrame of strings added to the ``class`` attribute of ``<td>``
HTML elements.
Parameters
----------
classes : DataFrame
DataFrame containing strings that will be translated to CSS classes,
mapped by identical column and index key values that must exist on the
underlying Styler data. None, NaN values, and empty strings will
be ignored and not affect the rendered HTML.
Returns
-------
self : Styler
See Also
--------
Styler.set_table_styles: Set the table styles included within the ``<style>``
HTML element.
Styler.set_table_attributes: Set the table attributes added to the ``<table>``
HTML element.
Notes
-----
Can be used in combination with ``Styler.set_table_styles`` to define an
internal CSS solution without reference to external CSS files.
Examples
--------
>>> df = pd.DataFrame(data=[[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
>>> classes = pd.DataFrame([
... ["min-val red", "", "blue"],
... ["red", None, "blue max-val"]
... ], index=df.index, columns=df.columns)
>>> df.style.set_td_classes(classes)
Using `MultiIndex` columns and a `classes` `DataFrame` as a subset of the
underlying,
>>> df = pd.DataFrame([[1,2],[3,4]], index=["a", "b"],
... columns=[["level0", "level0"], ["level1a", "level1b"]])
>>> classes = pd.DataFrame(["min-val"], index=["a"],
... columns=[["level0"],["level1a"]])
>>> df.style.set_td_classes(classes)
Form of the output with new additional css classes,
>>> df = pd.DataFrame([[1]])
>>> css = pd.DataFrame([["other-class"]])
>>> s = Styler(df, uuid="_", cell_ids=False).set_td_classes(css)
>>> s.hide_index().render()
'<style type="text/css"></style>'
'<table id="T__">'
' <thead>'
' <tr><th class="col_heading level0 col0" >0</th></tr>'
' </thead>'
' <tbody>'
' <tr><td class="data row0 col0 other-class" >1</td></tr>'
' </tbody>'
'</table>'
"""
if not classes.index.is_unique or not classes.columns.is_unique:
raise KeyError(
"Classes render only if `classes` has unique index and columns."
)
classes = classes.reindex_like(self.data)
for r, row_tup in enumerate(classes.itertuples()):
for c, value in enumerate(row_tup[1:]):
if not (pd.isna(value) or value == ""):
self.cell_context[(r, c)] = str(value)
return self
def _update_ctx(self, attrs: DataFrame) -> None:
"""
Update the state of the ``Styler`` for data cells.
Collects a mapping of {index_label: [('<property>', '<value>'), ..]}.
Parameters
----------
attrs : DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
if not self.index.is_unique or not self.columns.is_unique:
raise KeyError(
"`Styler.apply` and `.applymap` are not compatible "
"with non-unique index or columns."
)
for cn in attrs.columns:
for rn, c in attrs[[cn]].itertuples():
if not c:
continue
css_list = maybe_convert_css_to_tuples(c)
i, j = self.index.get_loc(rn), self.columns.get_loc(cn)
self.ctx[(i, j)].extend(css_list)
def _copy(self, deepcopy: bool = False) -> Styler:
styler = Styler(
self.data,
precision=self.precision,
caption=self.caption,
table_attributes=self.table_attributes,
cell_ids=self.cell_ids,
na_rep=self.na_rep,
)
styler.uuid = self.uuid
styler.hidden_index = self.hidden_index
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
styler.table_styles = copy.deepcopy(self.table_styles)
styler.hidden_columns = copy.copy(self.hidden_columns)
styler.cell_context = copy.deepcopy(self.cell_context)
styler.tooltips = copy.deepcopy(self.tooltips)
else:
styler.ctx = self.ctx
styler._todo = self._todo
styler.table_styles = self.table_styles
styler.hidden_columns = self.hidden_columns
styler.cell_context = self.cell_context
styler.tooltips = self.tooltips
return styler
def __copy__(self) -> Styler:
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo) -> Styler:
return self._copy(deepcopy=True)
def clear(self) -> None:
"""
Reset the ``Styler``, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self.tooltips = None
self.cell_context.clear()
self._todo.clear()
self.hidden_index = False
self.hidden_columns = []
# self.format and self.table_styles may be dependent on user
# input in self.__init__()
def _apply(
self,
func: Callable[..., Styler],
axis: Axis | None = 0,
subset: Subset | None = None,
**kwargs,
) -> Styler:
subset = slice(None) if subset is None else subset
subset = non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis, result_type="expand", **kwargs)
result.columns = data.columns
else:
result = func(data, **kwargs)
if not isinstance(result, DataFrame):
if not isinstance(result, np.ndarray):
raise TypeError(
f"Function {repr(func)} must return a DataFrame or ndarray "
f"when passed to `Styler.apply` with axis=None"
)
if not (data.shape == result.shape):
raise ValueError(
f"Function {repr(func)} returned ndarray with wrong shape.\n"
f"Result has shape: {result.shape}\n"
f"Expected shape: {data.shape}"
)
result = DataFrame(result, index=data.index, columns=data.columns)
elif not (
result.index.equals(data.index) and result.columns.equals(data.columns)
):
raise ValueError(
f"Result of {repr(func)} must have identical "
f"index and columns as the input"
)
if result.shape != data.shape:
raise ValueError(
f"Function {repr(func)} returned the wrong shape.\n"
f"Result has shape: {result.shape}\n"
f"Expected shape: {data.shape}"
)
self._update_ctx(result)
return self
def apply(
self,
func: Callable[..., Styler],
axis: Axis | None = 0,
subset: Subset | None = None,
**kwargs,
) -> Styler:
"""
Apply a CSS-styling function column-wise, row-wise, or table-wise.
Updates the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series if ``axis`` in [0,1] and return an object
of same length, also with identical index if the object is a Series.
``func`` should take a DataFrame if ``axis`` is ``None`` and return either
an ndarray with the same shape or a DataFrame with identical columns and
index.
.. versionchanged:: 1.3.0
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
subset : label, array-like, IndexSlice, optional
A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
or single key, to `DataFrame.loc[:, <subset>]` where the columns are
prioritised, to limit ``data`` to *before* applying the function.
**kwargs : dict
Pass along to ``func``.
Returns
-------
self : Styler
See Also
--------
Styler.applymap: Apply a CSS-styling function elementwise.
Notes
-----
The elements of the output of ``func`` should be CSS styles as strings, in the
format 'attribute: value; attribute2: value2; ...' or,
if nothing is to be applied to that element, an empty string or ``None``.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x, color):
... return np.where(x == np.nanmax(x.to_numpy()), f"color: {color};", None)
>>> df = pd.DataFrame(np.random.randn(5, 2), columns=["A", "B"])
>>> df.style.apply(highlight_max, color='red')
>>> df.style.apply(highlight_max, color='blue', axis=1)
>>> df.style.apply(highlight_max, color='green', axis=None)
Using ``subset`` to restrict application to a single column or multiple columns
>>> df.style.apply(highlight_max, color='red', subset="A")
>>> df.style.apply(highlight_max, color='red', subset=["A", "B"])
Using a 2d input to ``subset`` to select rows in addition to columns
>>> df.style.apply(highlight_max, color='red', subset=([0,1,2], slice(None))
>>> df.style.apply(highlight_max, color='red', subset=(slice(0,5,2), "A")
"""
self._todo.append(
(lambda instance: getattr(instance, "_apply"), (func, axis, subset), kwargs)
)
return self
def _applymap(
self, func: Callable, subset: Subset | None = None, **kwargs
) -> Styler:
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(
self, func: Callable, subset: Subset | None = None, **kwargs
) -> Styler:
"""
Apply a CSS-styling function elementwise.
Updates the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a scalar.
subset : label, array-like, IndexSlice, optional
A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
or single key, to `DataFrame.loc[:, <subset>]` where the columns are
prioritised, to limit ``data`` to *before* applying the function.
**kwargs : dict
Pass along to ``func``.
Returns
-------
self : Styler
See Also
--------
Styler.apply: Apply a CSS-styling function column-wise, row-wise, or table-wise.
Notes
-----
The elements of the output of ``func`` should be CSS styles as strings, in the
format 'attribute: value; attribute2: value2; ...' or,
if nothing is to be applied to that element, an empty string or ``None``.
Examples
--------
>>> def color_negative(v, color):
... return f"color: {color};" if v < 0 else None
>>> df = pd.DataFrame(np.random.randn(5, 2), columns=["A", "B"])
>>> df.style.applymap(color_negative, color='red')
Using ``subset`` to restrict application to a single column or multiple columns
>>> df.style.applymap(color_negative, color='red', subset="A")
>>> df.style.applymap(color_negative, color='red', subset=["A", "B"])
Using a 2d input to ``subset`` to select rows in addition to columns
>>> df.style.applymap(color_negative, color='red', subset=([0,1,2], slice(None))
>>> df.style.applymap(color_negative, color='red', subset=(slice(0,5,2), "A")
"""
self._todo.append(
(lambda instance: getattr(instance, "_applymap"), (func, subset), kwargs)
)
return self
def where(
self,
cond: Callable,
value: str,
other: str | None = None,
subset: Subset | None = None,
**kwargs,
) -> Styler:
"""
Apply CSS-styles based on a conditional function elementwise.
.. deprecated:: 1.3.0
Updates the HTML representation with a style which is
selected in accordance with the return value of a function.
Parameters
----------
cond : callable
``cond`` should take a scalar, and optional keyword arguments, and return
a boolean.
value : str
Applied when ``cond`` returns true.
other : str
Applied when ``cond`` returns false.
subset : label, array-like, IndexSlice, optional
A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
or single key, to `DataFrame.loc[:, <subset>]` where the columns are
prioritised, to limit ``data`` to *before* applying the function.
**kwargs : dict
Pass along to ``cond``.
Returns
-------
self : Styler
See Also
--------
Styler.applymap: Apply a CSS-styling function elementwise.
Styler.apply: Apply a CSS-styling function column-wise, row-wise, or table-wise.
Notes
-----
This method is deprecated.
This method is a convenience wrapper for :meth:`Styler.applymap`, which we
recommend using instead.
The example:
>>> df = pd.DataFrame([[1, 2], [3, 4]])
>>> def cond(v, limit=4):
... return v > 1 and v != limit
>>> df.style.where(cond, value='color:green;', other='color:red;')
should be refactored to:
>>> def style_func(v, value, other, limit=4):
... cond = v > 1 and v != limit
... return value if cond else other
>>> df.style.applymap(style_func, value='color:green;', other='color:red;')
"""
warnings.warn(
"this method is deprecated in favour of `Styler.applymap()`",
FutureWarning,
stacklevel=2,
)
if other is None:
other = ""
return self.applymap(
lambda val: value if cond(val, **kwargs) else other,
subset=subset,
)
def set_precision(self, precision: int) -> StylerRenderer:
"""
Set the precision used to display values.
.. deprecated:: 1.3.0
Parameters
----------
precision : int
Returns
-------
self : Styler
Notes
-----
This method is deprecated see `Styler.format`.
"""
warnings.warn(
"this method is deprecated in favour of `Styler.format(precision=..)`",
FutureWarning,
stacklevel=2,
)
self.precision = precision
return self.format(precision=precision, na_rep=self.na_rep)
def set_table_attributes(self, attributes: str) -> Styler:
"""
Set the table attributes added to the ``<table>`` HTML element.
These are items in addition to automatic (by default) ``id`` attribute.
Parameters
----------
attributes : str
Returns
-------
self : Styler
See Also
--------
Styler.set_table_styles: Set the table styles included within the ``<style>``
HTML element.
Styler.set_td_classes: Set the DataFrame of strings added to the ``class``
attribute of ``<td>`` HTML elements.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self) -> list[tuple[Callable, tuple, dict]]:
"""
Export the styles applied to the current ``Styler``.
Can be applied to a second Styler with ``Styler.use``.
Returns
-------
styles : list
See Also
--------
Styler.use: Set the styles on the current ``Styler``.
"""
return self._todo
def use(self, styles: list[tuple[Callable, tuple, dict]]) -> Styler:
"""
Set the styles on the current ``Styler``.
Possibly uses styles from ``Styler.export``.
Parameters
----------
styles : list
List of style functions.
Returns
-------
self : Styler
See Also
--------
Styler.export : Export the styles to applied to the current ``Styler``.
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid: str) -> Styler:
"""
Set the uuid applied to ``id`` attributes of HTML elements.
Parameters
----------
uuid : str
Returns
-------
self : Styler
Notes
-----
Almost all HTML elements within the table, and including the ``<table>`` element
are assigned ``id`` attributes. The format is ``T_uuid_<extra>`` where
``<extra>`` is typically a more specific identifier, such as ``row1_col2``.
"""
self.uuid = uuid
return self
def set_caption(self, caption: str) -> Styler:
"""
Set the text added to a ``<caption>`` HTML element.
Parameters
----------
caption : str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(
self,
table_styles: dict[Any, CSSStyles] | CSSStyles,
axis: int = 0,
overwrite: bool = True,
) -> Styler:
"""
Set the table styles included within the ``<style>`` HTML element.
This function can be used to style the entire table, columns, rows or
specific HTML selectors.
Parameters
----------
table_styles : list or dict
If supplying a list, each individual table_style should be a
dictionary with ``selector`` and ``props`` keys. ``selector``
should be a CSS selector that the style will be applied to
(automatically prefixed by the table's UUID) and ``props``
should be a list of tuples with ``(attribute, value)``.
If supplying a dict, the dict keys should correspond to
column names or index values, depending upon the specified
`axis` argument. These will be mapped to row or col CSS
selectors. MultiIndex values as dict keys should be
in their respective tuple form. The dict values should be
a list as specified in the form with CSS selectors and
props that will be applied to the specified row or column.
.. versionchanged:: 1.2.0
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``). Only used if `table_styles` is
dict.
.. versionadded:: 1.2.0
overwrite : bool, default True
Styles are replaced if `True`, or extended if `False`. CSS
rules are preserved so most recent styles set will dominate
if selectors intersect.
.. versionadded:: 1.2.0
Returns
-------
self : Styler
See Also
--------
Styler.set_td_classes: Set the DataFrame of strings added to the ``class``
attribute of ``<td>`` HTML elements.
Styler.set_table_attributes: Set the table attributes added to the ``<table>``
HTML element.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
Or with CSS strings
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': 'background-color: yellow; font-size: 1em;']}]
... )
Adding column styling by name
>>> df.style.set_table_styles({
... 'A': [{'selector': '',
... 'props': [('color', 'red')]}],
... 'B': [{'selector': 'td',
... 'props': 'color: blue;']}]
... }, overwrite=False)
Adding row styling
>>> df.style.set_table_styles({
... 0: [{'selector': 'td:hover',
... 'props': [('font-size', '25px')]}]
... }, axis=1, overwrite=False)
"""
if isinstance(table_styles, dict):
if axis in [0, "index"]:
obj, idf = self.data.columns, ".col"
else:
obj, idf = self.data.index, ".row"
table_styles = [
{
"selector": str(s["selector"]) + idf + str(idx),
"props": maybe_convert_css_to_tuples(s["props"]),
}
for key, styles in table_styles.items()
for idx in obj.get_indexer_for([key])
for s in styles
]
else:
table_styles = [
{
"selector": s["selector"],
"props": maybe_convert_css_to_tuples(s["props"]),
}
for s in table_styles
]
if not overwrite and self.table_styles is not None:
self.table_styles.extend(table_styles)
else:
self.table_styles = table_styles
return self
def set_na_rep(self, na_rep: str) -> StylerRenderer:
"""
Set the missing data representation on a ``Styler``.
.. versionadded:: 1.0.0
.. deprecated:: 1.3.0
Parameters
----------
na_rep : str
Returns
-------
self : Styler
Notes
-----
This method is deprecated. See `Styler.format()`
"""
warnings.warn(
"this method is deprecated in favour of `Styler.format(na_rep=..)`",
FutureWarning,
stacklevel=2,
)
self.na_rep = na_rep
return self.format(na_rep=na_rep, precision=self.precision)
def hide_index(self) -> Styler:
"""
Hide any indices from rendering.
Returns
-------
self : Styler
"""
self.hidden_index = True
return self
def hide_columns(self, subset: Subset) -> Styler:
"""
Hide columns from rendering.
Parameters
----------
subset : label, array-like, IndexSlice
A valid 1d input or single key along the appropriate axis within
`DataFrame.loc[]`, to limit ``data`` to *before* applying the function.
Returns
-------
self : Styler
"""
subset = non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
hcols = self.columns.get_indexer_for(hidden_df.columns)
# error: Incompatible types in assignment (expression has type
# "ndarray", variable has type "Sequence[int]")
self.hidden_columns = hcols # type: ignore[assignment]
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@doc(
name="background",
alt="text",
image_prefix="bg",
axis="{0 or 'index', 1 or 'columns', None}",
text_threshold="",
)
def background_gradient(
self,
cmap="PuBu",
low: float = 0,
high: float = 0,
axis: Axis | None = 0,
subset: Subset | None = None,
text_color_threshold: float = 0.408,
vmin: float | None = None,
vmax: float | None = None,
gmap: Sequence | None = None,
) -> Styler:
"""
Color the {name} in a gradient style.
The {name} color is determined according
to the data in each column, row or frame, or by a given
gradient map. Requires matplotlib.
Parameters
----------
cmap : str or colormap
Matplotlib colormap.
low : float
Compress the color range at the low end. This is a multiple of the data
range to extend below the minimum; good values usually in [0, 1],
defaults to 0.
high : float
Compress the color range at the high end. This is a multiple of the data
range to extend above the maximum; good values usually in [0, 1],
defaults to 0.
axis : {axis}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
subset : label, array-like, IndexSlice, optional
A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
or single key, to `DataFrame.loc[:, <subset>]` where the columns are
prioritised, to limit ``data`` to *before* applying the function.
text_color_threshold : float or int
{text_threshold}
Luminance threshold for determining text color in [0, 1]. Facilitates text
visibility across varying background colors. All text is dark if 0, and
light if 1, defaults to 0.408.
.. versionadded:: 0.24.0
vmin : float, optional
Minimum data value that corresponds to colormap minimum value.
If not specified the minimum value of the data (or gmap) will be used.
.. versionadded:: 1.0.0
vmax : float, optional
Maximum data value that corresponds to colormap maximum value.
If not specified the maximum value of the data (or gmap) will be used.
.. versionadded:: 1.0.0
gmap : array-like, optional
Gradient map for determining the {name} colors. If not supplied
will use the underlying data from rows, columns or frame. If given as an
ndarray or list-like must be an identical shape to the underlying data
considering ``axis`` and ``subset``. If given as DataFrame or Series must
have same index and column labels considering ``axis`` and ``subset``.
If supplied, ``vmin`` and ``vmax`` should be given relative to this
gradient map.
.. versionadded:: 1.3.0
Returns
-------
self : Styler
See Also
--------
Styler.{alt}_gradient: Color the {alt} in a gradient style.
Notes
-----
When using ``low`` and ``high`` the range
of the gradient, given by the data if ``gmap`` is not given or by ``gmap``,
is extended at the low end effectively by
`map.min - low * map.range` and at the high end by
`map.max + high * map.range` before the colors are normalized and determined.
If combining with ``vmin`` and ``vmax`` the `map.min`, `map.max` and
`map.range` are replaced by values according to the values derived from
``vmin`` and ``vmax``.
This method will preselect numeric columns and ignore non-numeric columns
unless a ``gmap`` is supplied in which case no preselection occurs.
Examples
--------
>>> df = pd.DataFrame(columns=["City", "Temp (c)", "Rain (mm)", "Wind (m/s)"],
... data=[["Stockholm", 21.6, 5.0, 3.2],
... ["Oslo", 22.4, 13.3, 3.1],
... ["Copenhagen", 24.5, 0.0, 6.7]])
Shading the values column-wise, with ``axis=0``, preselecting numeric columns
>>> df.style.{name}_gradient(axis=0)
.. figure:: ../../_static/style/{image_prefix}_ax0.png
Shading all values collectively using ``axis=None``
>>> df.style.{name}_gradient(axis=None)
.. figure:: ../../_static/style/{image_prefix}_axNone.png
Compress the color map from the both ``low`` and ``high`` ends
>>> df.style.{name}_gradient(axis=None, low=0.75, high=1.0)
.. figure:: ../../_static/style/{image_prefix}_axNone_lowhigh.png
Manually setting ``vmin`` and ``vmax`` gradient thresholds
>>> df.style.{name}_gradient(axis=None, vmin=6.7, vmax=21.6)
.. figure:: ../../_static/style/{image_prefix}_axNone_vminvmax.png
Setting a ``gmap`` and applying to all columns with another ``cmap``
>>> df.style.{name}_gradient(axis=0, gmap=df['Temp (c)'], cmap='YlOrRd')
.. figure:: ../../_static/style/{image_prefix}_gmap.png
Setting the gradient map for a dataframe (i.e. ``axis=None``), we need to
explicitly state ``subset`` to match the ``gmap`` shape
>>> gmap = np.array([[1,2,3], [2,3,4], [3,4,5]])
>>> df.style.{name}_gradient(axis=None, gmap=gmap,
... cmap='YlOrRd', subset=['Temp (c)', 'Rain (mm)', 'Wind (m/s)']
... )
.. figure:: ../../_static/style/{image_prefix}_axNone_gmap.png
"""
if subset is None and gmap is None:
subset = self.data.select_dtypes(include=np.number).columns
self.apply(
_background_gradient,
cmap=cmap,
subset=subset,
axis=axis,
low=low,
high=high,
text_color_threshold=text_color_threshold,
vmin=vmin,
vmax=vmax,
gmap=gmap,
)
return self
@doc(
background_gradient,
name="text",
alt="background",
image_prefix="tg",
axis="{0 or 'index', 1 or 'columns', None}",
text_threshold="This argument is ignored (only used in `background_gradient`).",
)
def text_gradient(
self,
cmap="PuBu",
low: float = 0,
high: float = 0,
axis: Axis | None = 0,
subset: Subset | None = None,
vmin: float | None = None,
vmax: float | None = None,
gmap: Sequence | None = None,
) -> Styler:
if subset is None and gmap is None:
subset = self.data.select_dtypes(include=np.number).columns
return self.apply(
_background_gradient,
cmap=cmap,
subset=subset,
axis=axis,
low=low,
high=high,
vmin=vmin,
vmax=vmax,
gmap=gmap,
text_only=True,
)
def set_properties(self, subset: Subset | None = None, **kwargs) -> Styler:
"""
Set defined CSS-properties to each ``<td>`` HTML element within the given
subset.
Parameters
----------
subset : label, array-like, IndexSlice, optional
A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
or single key, to `DataFrame.loc[:, <subset>]` where the columns are
prioritised, to limit ``data`` to *before* applying the function.
**kwargs : dict
A dictionary of property, value pairs to be set for each cell.
Returns
-------
self : Styler
Notes
-----
This is a convenience methods which wraps the :meth:`Styler.applymap` calling a
function returning the CSS-properties independently of the data.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
"""
values = "".join(f"{p}: {v};" for p, v in kwargs.items())
return self.applymap(lambda x: values, subset=subset)
@staticmethod
def _bar(
s,
align: str,
colors: list[str],
width: float = 100,
vmin: float | None = None,
vmax: float | None = None,
):
"""
Draw bar chart in dataframe cells.
"""
# Get input value range.
smin = np.nanmin(s.to_numpy()) if vmin is None else vmin
smax = np.nanmax(s.to_numpy()) if vmax is None else vmax
if align == "mid":
smin = min(0, smin)
smax = max(0, smax)
elif align == "zero":
# For "zero" mode, we want the range to be symmetrical around zero.
smax = max(abs(smin), abs(smax))
smin = -smax
# Transform to percent-range of linear-gradient
normed = width * (s.to_numpy(dtype=float) - smin) / (smax - smin + 1e-12)
zero = -width * smin / (smax - smin + 1e-12)
def css_bar(start: float, end: float, color: str) -> str:
"""
Generate CSS code to draw a bar from start to end.
"""
css = "width: 10em; height: 80%;"
if end > start:
css += "background: linear-gradient(90deg,"
if start > 0:
css += f" transparent {start:.1f}%, {color} {start:.1f}%, "
e = min(end, width)
css += f"{color} {e:.1f}%, transparent {e:.1f}%)"
return css
def css(x):
if pd.isna(x):
return ""
# avoid deprecated indexing `colors[x > zero]`
color = colors[1] if x > zero else colors[0]
if align == "left":
return css_bar(0, x, color)
else:
return css_bar(min(x, zero), max(x, zero), color)
if s.ndim == 1:
return [css(x) for x in normed]
else:
return DataFrame(
[[css(x) for x in row] for row in normed],
index=s.index,
columns=s.columns,
)
def bar(
self,
subset: Subset | None = None,
axis: Axis | None = 0,
color="#d65f5f",
width: float = 100,
align: str = "left",
vmin: float | None = None,
vmax: float | None = None,
) -> Styler:
"""
Draw bar chart in the cell backgrounds.
Parameters
----------
subset : label, array-like, IndexSlice, optional
A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
or single key, to `DataFrame.loc[:, <subset>]` where the columns are
prioritised, to limit ``data`` to *before* applying the function.
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
width : float, default 100
A number between 0 or 100. The largest value will cover `width`
percent of the cell's width.
align : {'left', 'zero',' mid'}, default 'left'
How to align the bars with the cells.
- 'left' : the min value starts at the left of the cell.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell.
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
.. versionadded:: 0.24.0
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
.. versionadded:: 0.24.0
Returns
-------
self : Styler
"""
if align not in ("left", "zero", "mid"):
raise ValueError("`align` must be one of {'left', 'zero',' mid'}")
if not (is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
raise ValueError(
"`color` must be string or a list-like "
"of length 2: [`color_neg`, `color_pos`] "
"(eg: color=['#d65f5f', '#5fba7d'])"
)
if subset is None:
subset = self.data.select_dtypes(include=np.number).columns
self.apply(
self._bar,
subset=subset,
axis=axis,
align=align,
colors=color,
width=width,
vmin=vmin,
vmax=vmax,
)
return self
def highlight_null(
self,
null_color: str = "red",
subset: Subset | None = None,
props: str | None = None,
) -> Styler:
"""
Highlight missing values with a style.
Parameters
----------
null_color : str, default 'red'
subset : label, array-like, IndexSlice, optional
A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
or single key, to `DataFrame.loc[:, <subset>]` where the columns are
prioritised, to limit ``data`` to *before* applying the function.
.. versionadded:: 1.1.0
props : str, default None
CSS properties to use for highlighting. If ``props`` is given, ``color``
is not used.
.. versionadded:: 1.3.0
Returns
-------
self : Styler
See Also
--------
Styler.highlight_max: Highlight the maximum with a style.
Styler.highlight_min: Highlight the minimum with a style.
Styler.highlight_between: Highlight a defined range with a style.
Styler.highlight_quantile: Highlight values defined by a quantile with a style.
"""
def f(data: DataFrame, props: str) -> np.ndarray:
return np.where(pd.isna(data).to_numpy(), props, "")
if props is None:
props = f"background-color: {null_color};"
# error: Argument 1 to "apply" of "Styler" has incompatible type
# "Callable[[DataFrame, str], ndarray]"; expected "Callable[..., Styler]"
return self.apply(
f, axis=None, subset=subset, props=props # type: ignore[arg-type]
)
def highlight_max(
self,
subset: Subset | None = None,
color: str = "yellow",
axis: Axis | None = 0,
props: str | None = None,
) -> Styler:
"""
Highlight the maximum with a style.
Parameters
----------
subset : label, array-like, IndexSlice, optional
A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
or single key, to `DataFrame.loc[:, <subset>]` where the columns are
prioritised, to limit ``data`` to *before* applying the function.
color : str, default 'yellow'
Background color to use for highlighting.
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
props : str, default None
CSS properties to use for highlighting. If ``props`` is given, ``color``
is not used.
.. versionadded:: 1.3.0
Returns
-------
self : Styler
See Also
--------
Styler.highlight_null: Highlight missing values with a style.
Styler.highlight_min: Highlight the minimum with a style.
Styler.highlight_between: Highlight a defined range with a style.
Styler.highlight_quantile: Highlight values defined by a quantile with a style.
"""
def f(data: FrameOrSeries, props: str) -> np.ndarray:
return np.where(data == np.nanmax(data.to_numpy()), props, "")
if props is None:
props = f"background-color: {color};"
# error: Argument 1 to "apply" of "Styler" has incompatible type
# "Callable[[FrameOrSeries, str], ndarray]"; expected "Callable[..., Styler]"
return self.apply(
f, axis=axis, subset=subset, props=props # type: ignore[arg-type]
)
def highlight_min(
self,
subset: Subset | None = None,
color: str = "yellow",
axis: Axis | None = 0,
props: str | None = None,
) -> Styler:
"""
Highlight the minimum with a style.
Parameters
----------
subset : label, array-like, IndexSlice, optional
A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
or single key, to `DataFrame.loc[:, <subset>]` where the columns are
prioritised, to limit ``data`` to *before* applying the function.
color : str, default 'yellow'
Background color to use for highlighting.
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
props : str, default None
CSS properties to use for highlighting. If ``props`` is given, ``color``
is not used.
.. versionadded:: 1.3.0
Returns
-------
self : Styler
See Also
--------
Styler.highlight_null: Highlight missing values with a style.
Styler.highlight_max: Highlight the maximum with a style.
Styler.highlight_between: Highlight a defined range with a style.
Styler.highlight_quantile: Highlight values defined by a quantile with a style.
"""
def f(data: FrameOrSeries, props: str) -> np.ndarray:
return np.where(data == np.nanmin(data.to_numpy()), props, "")
if props is None:
props = f"background-color: {color};"
# error: Argument 1 to "apply" of "Styler" has incompatible type
# "Callable[[FrameOrSeries, str], ndarray]"; expected "Callable[..., Styler]"
return self.apply(
f, axis=axis, subset=subset, props=props # type: ignore[arg-type]
)
def highlight_between(
self,
subset: Subset | None = None,
color: str = "yellow",
axis: Axis | None = 0,
left: Scalar | Sequence | None = None,
right: Scalar | Sequence | None = None,
inclusive: str = "both",
props: str | None = None,
) -> Styler:
"""
Highlight a defined range with a style.
.. versionadded:: 1.3.0
Parameters
----------
subset : label, array-like, IndexSlice, optional
A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
or single key, to `DataFrame.loc[:, <subset>]` where the columns are
prioritised, to limit ``data`` to *before* applying the function.
color : str, default 'yellow'
Background color to use for highlighting.
axis : {0 or 'index', 1 or 'columns', None}, default 0
If ``left`` or ``right`` given as sequence, axis along which to apply those
boundaries. See examples.
left : scalar or datetime-like, or sequence or array-like, default None
Left bound for defining the range.
right : scalar or datetime-like, or sequence or array-like, default None
Right bound for defining the range.
inclusive : {'both', 'neither', 'left', 'right'}
Identify whether bounds are closed or open.
props : str, default None
CSS properties to use for highlighting. If ``props`` is given, ``color``
is not used.
Returns
-------
self : Styler
See Also
--------
Styler.highlight_null: Highlight missing values with a style.
Styler.highlight_max: Highlight the maximum with a style.
Styler.highlight_min: Highlight the minimum with a style.
Styler.highlight_quantile: Highlight values defined by a quantile with a style.
Notes
-----
If ``left`` is ``None`` only the right bound is applied.
If ``right`` is ``None`` only the left bound is applied. If both are ``None``
all values are highlighted.
``axis`` is only needed if ``left`` or ``right`` are provided as a sequence or
an array-like object for aligning the shapes. If ``left`` and ``right`` are
both scalars then all ``axis`` inputs will give the same result.
This function only works with compatible ``dtypes``. For example a datetime-like
region can only use equivalent datetime-like ``left`` and ``right`` arguments.
Use ``subset`` to control regions which have multiple ``dtypes``.
Examples
--------
Basic usage
>>> df = pd.DataFrame({
... 'One': [1.2, 1.6, 1.5],
... 'Two': [2.9, 2.1, 2.5],
... 'Three': [3.1, 3.2, 3.8],
... })
>>> df.style.highlight_between(left=2.1, right=2.9)
.. figure:: ../../_static/style/hbetw_basic.png
Using a range input sequnce along an ``axis``, in this case setting a ``left``
and ``right`` for each column individually
>>> df.style.highlight_between(left=[1.4, 2.4, 3.4], right=[1.6, 2.6, 3.6],
... axis=1, color="#fffd75")
.. figure:: ../../_static/style/hbetw_seq.png
Using ``axis=None`` and providing the ``left`` argument as an array that
matches the input DataFrame, with a constant ``right``
>>> df.style.highlight_between(left=[[2,2,3],[2,2,3],[3,3,3]], right=3.5,
... axis=None, color="#fffd75")
.. figure:: ../../_static/style/hbetw_axNone.png
Using ``props`` instead of default background coloring
>>> df.style.highlight_between(left=1.5, right=3.5,
... props='font-weight:bold;color:#e83e8c')
.. figure:: ../../_static/style/hbetw_props.png
"""
if props is None:
props = f"background-color: {color};"
return self.apply(
_highlight_between, # type: ignore[arg-type]
axis=axis,
subset=subset,
props=props,
left=left,
right=right,
inclusive=inclusive,
)
def highlight_quantile(
self,
subset: Subset | None = None,
color: str = "yellow",
axis: Axis | None = 0,
q_left: float = 0.0,
q_right: float = 1.0,
interpolation: str = "linear",
inclusive: str = "both",
props: str | None = None,
) -> Styler:
"""
Highlight values defined by a quantile with a style.
.. versionadded:: 1.3.0
Parameters
----------
subset : label, array-like, IndexSlice, optional
A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
or single key, to `DataFrame.loc[:, <subset>]` where the columns are
prioritised, to limit ``data`` to *before* applying the function.
color : str, default 'yellow'
Background color to use for highlighting
axis : {0 or 'index', 1 or 'columns', None}, default 0
Axis along which to determine and highlight quantiles. If ``None`` quantiles
are measured over the entire DataFrame. See examples.
q_left : float, default 0
Left bound, in [0, q_right), for the target quantile range.
q_right : float, default 1
Right bound, in (q_left, 1], for the target quantile range.
interpolation : {‘linear’, ‘lower’, ‘higher’, ‘midpoint’, ‘nearest’}
Argument passed to ``Series.quantile`` or ``DataFrame.quantile`` for
quantile estimation.
inclusive : {'both', 'neither', 'left', 'right'}
Identify whether quantile bounds are closed or open.
props : str, default None
CSS properties to use for highlighting. If ``props`` is given, ``color``
is not used.
Returns
-------
self : Styler
See Also
--------
Styler.highlight_null: Highlight missing values with a style.
Styler.highlight_max: Highlight the maximum with a style.
Styler.highlight_min: Highlight the minimum with a style.
Styler.highlight_between: Highlight a defined range with a style.
Notes
-----
This function does not work with ``str`` dtypes.
Examples
--------
Using ``axis=None`` and apply a quantile to all collective data
>>> df = pd.DataFrame(np.arange(10).reshape(2,5) + 1)
>>> df.style.highlight_quantile(axis=None, q_left=0.8, color="#fffd75")
.. figure:: ../../_static/style/hq_axNone.png
Or highlight quantiles row-wise or column-wise, in this case by row-wise
>>> df.style.highlight_quantile(axis=1, q_left=0.8, color="#fffd75")
.. figure:: ../../_static/style/hq_ax1.png
Use ``props`` instead of default background coloring
>>> df.style.highlight_quantile(axis=None, q_left=0.2, q_right=0.8,
... props='font-weight:bold;color:#e83e8c')
.. figure:: ../../_static/style/hq_props.png
"""
subset_ = slice(None) if subset is None else subset
subset_ = non_reducing_slice(subset_)
data = self.data.loc[subset_]
# after quantile is found along axis, e.g. along rows,
# applying the calculated quantile to alternate axis, e.g. to each column
kwargs = {"q": [q_left, q_right], "interpolation": interpolation}
if axis in [0, "index"]:
q = data.quantile(axis=axis, numeric_only=False, **kwargs)
axis_apply: int | None = 1
elif axis in [1, "columns"]:
q = data.quantile(axis=axis, numeric_only=False, **kwargs)
axis_apply = 0
else: # axis is None
q = Series(data.to_numpy().ravel()).quantile(**kwargs)
axis_apply = None
if props is None:
props = f"background-color: {color};"
return self.apply(
_highlight_between, # type: ignore[arg-type]
axis=axis_apply,
subset=subset,
props=props,
left=q.iloc[0],
right=q.iloc[1],
inclusive=inclusive,
)
@classmethod
def from_custom_template(cls, searchpath, name):
"""
Factory function for creating a subclass of ``Styler``.
Uses a custom template and Jinja environment.
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates.
name : str
Name of your custom template to use for rendering.
Returns
-------
MyStyler : subclass of Styler
Has the correct ``env`` and ``template`` class attributes set.
"""
loader = jinja2.ChoiceLoader([jinja2.FileSystemLoader(searchpath), cls.loader])
# mypy doesn't like dynamically-defined classes
# error: Variable "cls" is not valid as a type
# error: Invalid base class "cls"
class MyStyler(cls): # type:ignore[valid-type,misc]
env = jinja2.Environment(loader=loader)
template_html = env.get_template(name)
return MyStyler
def pipe(self, func: Callable, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
.. versionadded:: 0.24.0
Parameters
----------
func : function
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
*args : optional
Arguments passed to `func`.
**kwargs : optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object :
The value returned by ``func``.
See Also
--------
DataFrame.pipe : Analogous method for DataFrame.
Styler.apply : Apply a CSS-styling function column-wise, row-wise, or
table-wise.
Notes
-----
Like :meth:`DataFrame.pipe`, this method can simplify the
application of several user-defined functions to a styler. Instead
of writing:
.. code-block:: python
f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)
users can write:
.. code-block:: python
(df.style.set_precision(3)
.pipe(g, arg1=a)
.pipe(f, arg2=b, arg3=c))
In particular, this allows users to define functions that take a
styler object, along with other parameters, and return the styler after
making styling changes (such as calling :meth:`Styler.apply` or
:meth:`Styler.set_properties`). Using ``.pipe``, these user-defined
style "transformations" can be interleaved with calls to the built-in
Styler interface.
Examples
--------
>>> def format_conversion(styler):
... return (styler.set_properties(**{'text-align': 'right'})
... .format({'conversion': '{:.1%}'}))
The user-defined ``format_conversion`` function above can be called
within a sequence of other style modifications:
>>> df = pd.DataFrame({'trial': list(range(5)),
... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})
>>> (df.style
... .highlight_min(subset=['conversion'], color='yellow')
... .pipe(format_conversion)
... .set_caption("Results with minimum conversion highlighted."))
"""
return com.pipe(self, func, *args, **kwargs)
def _validate_apply_axis_arg(
arg: FrameOrSeries | Sequence | np.ndarray,
arg_name: str,
dtype: Any | None,
data: FrameOrSeries,
) -> np.ndarray:
"""
For the apply-type methods, ``axis=None`` creates ``data`` as DataFrame, and for
``axis=[1,0]`` it creates a Series. Where ``arg`` is expected as an element
of some operator with ``data`` we must make sure that the two are compatible shapes,
or raise.
Parameters
----------
arg : sequence, Series or DataFrame
the user input arg
arg_name : string
name of the arg for use in error messages
dtype : numpy dtype, optional
forced numpy dtype if given
data : Series or DataFrame
underling subset of Styler data on which operations are performed
Returns
-------
ndarray
"""
dtype = {"dtype": dtype} if dtype else {}
# raise if input is wrong for axis:
if isinstance(arg, Series) and isinstance(data, DataFrame):
raise ValueError(
f"'{arg_name}' is a Series but underlying data for operations "
f"is a DataFrame since 'axis=None'"
)
elif isinstance(arg, DataFrame) and isinstance(data, Series):
raise ValueError(
f"'{arg_name}' is a DataFrame but underlying data for "
f"operations is a Series with 'axis in [0,1]'"
)
elif isinstance(arg, (Series, DataFrame)): # align indx / cols to data
arg = arg.reindex_like(data, method=None).to_numpy(**dtype)
else:
arg = np.asarray(arg, **dtype)
assert isinstance(arg, np.ndarray) # mypy requirement
if arg.shape != data.shape: # check valid input
raise ValueError(
f"supplied '{arg_name}' is not correct shape for data over "
f"selected 'axis': got {arg.shape}, "
f"expected {data.shape}"
)
return arg
def _background_gradient(
data,
cmap="PuBu",
low: float = 0,
high: float = 0,
text_color_threshold: float = 0.408,
vmin: float | None = None,
vmax: float | None = None,
gmap: Sequence | np.ndarray | FrameOrSeries | None = None,
text_only: bool = False,
):
"""
Color background in a range according to the data or a gradient map
"""
if gmap is None: # the data is used the gmap
gmap = data.to_numpy(dtype=float)
else: # else validate gmap against the underlying data
gmap = _validate_apply_axis_arg(gmap, "gmap", float, data)
with _mpl(Styler.background_gradient) as (plt, colors):
smin = np.nanmin(gmap) if vmin is None else vmin
smax = np.nanmax(gmap) if vmax is None else vmax
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(smin - (rng * low), smax + (rng * high))
rgbas = plt.cm.get_cmap(cmap)(norm(gmap))
def relative_luminance(rgba) -> float:
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.04045 else ((x + 0.055) / 1.055) ** 2.4
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba, text_only) -> str:
if not text_only:
dark = relative_luminance(rgba) < text_color_threshold
text_color = "#f1f1f1" if dark else "#000000"
return f"background-color: {colors.rgb2hex(rgba)};color: {text_color};"
else:
return f"color: {colors.rgb2hex(rgba)};"
if data.ndim == 1:
return [css(rgba, text_only) for rgba in rgbas]
else:
return DataFrame(
[[css(rgba, text_only) for rgba in row] for row in rgbas],
index=data.index,
columns=data.columns,
)
def _highlight_between(
data: FrameOrSeries,
props: str,
left: Scalar | Sequence | np.ndarray | FrameOrSeries | None = None,
right: Scalar | Sequence | np.ndarray | FrameOrSeries | None = None,
inclusive: bool | str = True,
) -> np.ndarray:
"""
Return an array of css props based on condition of data values within given range.
"""
if np.iterable(left) and not isinstance(left, str):
left = _validate_apply_axis_arg(
left, "left", None, data # type: ignore[arg-type]
)
if np.iterable(right) and not isinstance(right, str):
right = _validate_apply_axis_arg(
right, "right", None, data # type: ignore[arg-type]
)
# get ops with correct boundary attribution
if inclusive == "both":
ops = (operator.ge, operator.le)
elif inclusive == "neither":
ops = (operator.gt, operator.lt)
elif inclusive == "left":
ops = (operator.ge, operator.lt)
elif inclusive == "right":
ops = (operator.gt, operator.le)
else:
raise ValueError(
f"'inclusive' values can be 'both', 'left', 'right', or 'neither' "
f"got {inclusive}"
)
g_left = (
ops[0](data, left)
if left is not None
else np.full(data.shape, True, dtype=bool)
)
l_right = (
ops[1](data, right)
if right is not None
else np.full(data.shape, True, dtype=bool)
)
return np.where(g_left & l_right, props, "")
| bsd-3-clause |
djevans071/Rebalancing-Citibike | concat_data.py | 1 | 4622 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 2 21:04:48 2017
@author: psamtik071
"""
from workflow.data import *
import pandas as pd
import os
import pdb
import holidays
year = 2015
month = 1
#create flux data table from trips dataframe and rebal dataframe
# create 'start/stop_hour' and 'start/stop_date' features
def split_off_times(df):
df['start_hour'] = df.start_time.dt.hour
df['stop_hour'] = df.stop_time.dt.hour
df[['start_date', 'stop_date']] = df[['start_time', 'stop_time']].apply(lambda x: x.dt.floor('d'))
return df
def create_fluxes(df, id_key, date_key, hour_key, fl_key):
# id_key and hour_key are start_id/stop_id and start_date/end_date
# start is associated with an fl_key = 'flux_out',
# and stop is associated with fl_key = 'flux_in'
use_cols = [id_key, date_key, hour_key, 'duration']
flux = df.groupby([id_key, date_key, hour_key]).count()
flux = flux.reset_index()[use_cols]
col_dict = {'duration': fl_key,
date_key: 'date', hour_key: 'hour',
id_key: 'id'}
return flux.rename(columns = col_dict)
def transform_times(df):
# calculate approximate pickup and drop-off times for rebalancing trips
t_start = df.start_time
t_end = df.stop_time
time_diff = t_start - t_end
r_start = t_end + time_diff/3.
r_end = t_end + time_diff*(2/3.)
df['start_time'] = r_start
df['stop_time'] = r_end
return df.rename(columns = {'start_id':'stop_id', 'stop_id':'start_id'})
def merge_fluxes(df1, df2):
# concatenate fluxes or any other dataset with the keys
# 'id' 'date' and 'hour
return pd.merge(df1, df2, how='outer',
on = ['id', 'date', 'hour']).fillna(0)
def initial_features(trips, rebals):
# create "hour" and "date" features
trips = split_off_times(trips)
# create fluxes from normal trips and merge
bikes_out = create_fluxes(trips, 'start_id', 'start_date','start_hour', 'bikes_out')
bikes_in = create_fluxes(trips, 'stop_id', 'stop_date','stop_hour', 'bikes_in')
merged = merge_fluxes(bikes_out, bikes_in)
# create weekday column (Monday = 0, Sunday = 6)
# and create other time-specific columns
merged['dayofweek'] = merged.date.dt.weekday
merged['month'] = merged.date.dt.month
merged['year'] = merged.date.dt.year
merged['is_weekday'] = merged.dayofweek.isin(range(5))
merged['is_holiday'] = merged.date.isin(holidays.UnitedStates())
# ----------- CALCULATING FLUXES FOR REBALANCING TRIPS ----------------
rebals = transform_times(rebals)
rebals = split_off_times(rebals)
#create fluxes from rebalanced trips
rflux_out = create_fluxes(rebals, 'start_id', 'start_date', 'start_hour', 'rbikes_out')
rflux_in = create_fluxes(rebals, 'stop_id', 'stop_date', 'stop_hour', 'rbikes_in')
rmerged = merge_fluxes(rflux_out, rflux_in)
rmerged['rebal_net_flux'] = rmerged.rbikes_in - rmerged.rbikes_out
#rmerged = rmerged.drop(['rbikes_in', 'rbikes_out'], axis=1)
features = merge_fluxes(merged, rmerged)
# ------ add more features ---------------------------
# add station availability data
avail_db = station_data(year,month)
features = merge_fluxes(features, avail_db)
features = features[features.year != 0.]
#pdb.set_trace()
return features
# --------- execute script to write features data to disk------------------
# check_cols = ['start_time', 'stop_time', 'start_id', 'stop_id', 'duration']
# trips = trip_data(year,month)[check_cols]
# rebals = rebal_data(year, month)[check_cols]
#
# out_filename = 'features_data/{}{:02}-features-data.csv'.format(year,month)
# features = initial_features(trips, rebals)
# features.to_csv(out_filename)
for year in xrange(2015,2018):
for month in xrange(1,13):
out_filename = 'features_data/{}{:02}-features-data.csv'.format(year,month)
print "extracting features for {}-{:02}".format(year, month)
if os.path.exists(out_filename):
print "{} already exists".format(out_filename)
pass
else:
try:
check_cols = ['start_time', 'stop_time',
'start_id', 'stop_id', 'duration']
trips = trip_data(year,month)[check_cols]
rebals = rebal_data(year, month)[check_cols]
features = initial_features(trips, rebals)
features.to_csv(out_filename, index = None)
except IOError:
print '\tInput data does not exist for {}-{:02}'.format(year, month)
| mit |
appapantula/Data-Science-45min-Intros | support-vector-machines-101/kernel-examples.py | 26 | 2054 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Josh Montague"
__license__="MIT License"
import sys
import json
import matplotlib.pyplot as plt
try:
import seaborn as sns
except ImportError as e:
sys.stderr.write("seaborn not installed. Using default matplotlib templates.")
import numpy as np
from sklearn.svm import SVC
# adapted from:
# http://scikit-learn.org/stable/auto_examples/svm/plot_svm_kernels.html
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf', 'sigmoid'):
#clf = SVC(kernel=kernel)
clf = SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(8, 6))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
facecolors='none', zorder=10, s=300)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired, s=100)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
#plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.pcolormesh(XX, YY, Z > 0, alpha=0.1)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.title('{}'.format(kernel))
#plt.xticks(())
#plt.yticks(())
fignum = fignum + 1
plt.show()
| unlicense |
Kamp9/scipy | scipy/stats/stats.py | 18 | 169352 | # Copyright (c) Gary Strangman. All rights reserved
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
#
# Heavily adapted for use by SciPy 2002 by Travis Oliphant
"""
A collection of basic statistical functions for python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Moments Handling NaN:
.. autosummary::
:toctree: generated/
nanmean
nanmedian
nanstd
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
sem
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
Probability Calculations
------------------------
.. autosummary::
:toctree: generated/
chisqprob
betai
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
f_value
Support Functions
-----------------
.. autosummary::
:toctree: generated/
ss
square_of_sums
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
from scipy._lib.six import xrange
# Scipy imports.
from scipy._lib.six import callable, string_types
from numpy import array, asarray, ma, zeros
import scipy.special as special
import scipy.linalg as linalg
import numpy as np
from . import distributions
from . import mstats_basic
from ._distn_infrastructure import _lazywhere
from ._stats_mstats_common import find_repeats, linregress, theilslopes
from ._rank import rankdata, tiecorrect
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata', 'nanmean',
'nanstd', 'nanmedian', 'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
if nan_policy not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be either 'propagate', 'raise', or "
"'ignore'")
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# If the check cannot be properly performed we fallback to omiting
# nan values and raising a warning. This can happen when attempting to
# sum things that are not numbers (e.g. as in the function `mode`).
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
#######
# NAN friendly functions
########
@np.deprecate(message="scipy.stats.nanmean is deprecated in scipy 0.15.0 "
"in favour of numpy.nanmean.")
def nanmean(x, axis=0):
"""
Compute the mean over the given axis ignoring nans.
Parameters
----------
x : ndarray
Input array.
axis : int or None, optional
Axis along which the mean is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The mean of `x`, ignoring nans.
See Also
--------
nanstd, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.linspace(0, 4, 3)
>>> a
array([ 0., 2., 4.])
>>> a[-1] = np.nan
>>> stats.nanmean(a)
1.0
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
factor = 1.0 - np.sum(mask, axis) / Norig
x[mask] = 0.0
return np.mean(x, axis) / factor
@np.deprecate(message="scipy.stats.nanstd is deprecated in scipy 0.15 "
"in favour of numpy.nanstd.\nNote that numpy.nanstd "
"has a different signature.")
def nanstd(x, axis=0, bias=False):
"""
Compute the standard deviation over the given axis, ignoring nans.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the standard deviation is computed. Default is 0.
If None, compute over the whole array `x`.
bias : bool, optional
If True, the biased (normalized by N) definition is used. If False
(default), the unbiased definition is used.
Returns
-------
s : float
The standard deviation.
See Also
--------
nanmean, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10, dtype=float)
>>> a[1:3] = np.nan
>>> np.std(a)
nan
>>> stats.nanstd(a)
2.9154759474226504
>>> stats.nanstd(a.reshape(2, 5), axis=1)
array([ 2.0817, 1.5811])
>>> stats.nanstd(a.reshape(2, 5), axis=None)
2.9154759474226504
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
Nnan = np.sum(mask, axis) * 1.0
n = Norig - Nnan
x[mask] = 0.0
m1 = np.sum(x, axis) / n
if axis:
d = x - np.expand_dims(m1, axis)
else:
d = x - m1
d *= d
m2 = np.sum(d, axis) - m1 * m1 * Nnan
if bias:
m2c = m2 / n
else:
m2c = m2 / (n - 1.0)
return np.sqrt(m2c)
def _nanmedian(arr1d): # This only works on 1d arrays
"""Private function for rank a arrays. Compute the median ignoring Nan.
Parameters
----------
arr1d : ndarray
Input array, of rank 1.
Results
-------
m : float
The median.
"""
x = arr1d.copy()
c = np.isnan(x)
s = np.where(c)[0]
if s.size == x.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning)
return np.nan
elif s.size != 0:
# select non-nans at end of array
enonan = x[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
x = x[:-s.size]
return np.median(x, overwrite_input=True)
@np.deprecate(message="scipy.stats.nanmedian is deprecated in scipy 0.15 "
"in favour of numpy.nanmedian.")
def nanmedian(x, axis=0):
"""
Compute the median along the given axis ignoring nan values.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the median is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The median of `x` along `axis`.
See Also
--------
nanstd, nanmean, numpy.nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 3, 1, 5, 5, np.nan])
>>> stats.nanmedian(a)
array(3.0)
>>> b = np.array([0, 3, 1, 5, 5, np.nan, 5])
>>> stats.nanmedian(b)
array(4.0)
Example with axis:
>>> c = np.arange(30.).reshape(5,6)
>>> idx = np.array([False, False, False, True, False] * 6).reshape(5,6)
>>> c[idx] = np.nan
>>> c
array([[ 0., 1., 2., nan, 4., 5.],
[ 6., 7., nan, 9., 10., 11.],
[ 12., nan, 14., 15., 16., 17.],
[ nan, 19., 20., 21., 22., nan],
[ 24., 25., 26., 27., nan, 29.]])
>>> stats.nanmedian(c, axis=1)
array([ 2. , 9. , 15. , 20.5, 26. ])
"""
x, axis = _chk_asarray(x, axis)
if x.ndim == 0:
return float(x.item())
if hasattr(np, 'nanmedian'): # numpy 1.9 faster for some cases
return np.nanmedian(x, axis)
x = np.apply_along_axis(_nanmedian, axis, x)
if x.ndim == 0:
x = float(x.item())
return x
#####################################
# CENTRAL TENDENCY #
#####################################
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Returns the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype: # Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculates the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0): # Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0/a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
def mode(a, axis=0, nan_policy='propagate'):
"""
Returns an array of the modal (most common) value in the passed array.
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return np.array([]), np.array([])
contains_nan, nan_policy = _contains_nan(a, nan_policy)
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
return ModeResult(mostfrequent, oldcounts)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
See also
--------
trim_mean : returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = _mask_to_limits(a, limits, inclusive)
return np.ma.var(am, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmin : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmax : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
"""
Calculates the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of points.
It is often used to calculate coefficients of skewness and kurtosis due
to its close relationship with them.
Parameters
----------
a : array_like
data
moment : int or array_like of ints, optional
order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] http://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if contains_nan and nan_policy == 'propagate':
return np.nan
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.ones(np.asarray(moment).shape, dtype=np.float64) * np.nan
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
if contains_nan and nan_policy == 'propagate':
return np.nan
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
"""
Computes the skewness of a data set.
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
if contains_nan and nan_policy == 'propagate':
return np.nan
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
if contains_nan and nan_policy == 'propagate':
return np.nan
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int
Number of observations (length of data along `axis`).
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.1666666666666661,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([ 2., 3.]), variance=array([ 2., 2.]),
skewness=array([ 0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
# Return namedtuple for clarity
DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
'variance', 'skewness',
'kurtosis'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if contains_nan and nan_policy == 'propagate':
res = np.zeros(6) * np.nan
return DescribeResult(*res)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
"""
a, axis = _chk_asarray(a, axis)
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if contains_nan and nan_policy == 'propagate':
return SkewtestResult(np.nan, np.nan)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
"""
a, axis = _chk_asarray(a, axis)
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic',
'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
if contains_nan and nan_policy == 'propagate':
return KurtosistestResult(np.nan, np.nan)
n = float(a.shape[axis])
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
x = (b2-E) / np.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size," Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Testing for
departures from normality," Biometrika, 60, 613-622
"""
a, axis = _chk_asarray(a, axis)
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
if contains_nan and nan_policy == 'propagate':
return NormaltestResult(np.nan, np.nan)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def itemfreq(a):
"""
Returns a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
return np.array(score)
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
@np.deprecate(message=("scipy.stats.histogram2 is deprecated in scipy 0.16.0; "
"use np.histogram2d instead"))
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separates the range into several bins and returns the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
HistogramResult = namedtuple('HistogramResult', ('count', 'lowerlimit',
'binsize', 'extrapoints'))
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
CumfreqResult = namedtuple('CumfreqResult', ('cumcount', 'lowerlimit',
'binsize', 'extrapoints'))
return CumfreqResult(cumhist, l, b, e)
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
h = h / float(a.shape[0])
RelfreqResult = namedtuple('RelfreqResult', ('frequency', 'lowerlimit',
'binsize', 'extrapoints'))
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Computes the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
# If the arrays are not all the same shape, calling np.array(arrays)
# creates a 1-D array with dtype `object` in numpy 1.6+. In numpy
# 1.5.x, it raises an exception. To work around this, we explicitly
# set the dtype to `object` when the arrays are not all the same shape.
if len(arrays) < 2 or all(x.shape == arrays[0].shape for x in arrays[1:]):
dt = None
else:
dt = object
return np.array(arrays, dtype=dt)
@np.deprecate(message="scipy.stats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(a, axis=0, ddof=0):
"""
The signal-to-noise ratio of the input data.
Returns the signal-to-noise ratio of `a`, here defined as the mean
divided by the standard deviation.
Parameters
----------
a : array_like
An array_like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction for standard deviation. Default is 0.
Returns
-------
s2n : ndarray
The mean to standard deviation ratio(s) along `axis`, or 0 where the
standard deviation is 0.
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Calculates the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std nd stats.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
if contains_nan and nan_policy == 'propagate':
return np.nan
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculates the z score of each value in the sample, relative to the sample
mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of input
array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, 0.1954,
... 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom (``ddof=1``)
to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculates the relative z-scores.
Returns an array of z-scores, i.e., scores that are standardized to zero
mean and unit variance, where mean and variance are calculated from the
comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
#####################################
# TRIMMING FUNCTIONS #
#####################################
@np.deprecate(message="stats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower',
'upper'))
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slices off less if proportion results in a non-integer slice index (i.e.,
conservatively slices off`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[sl]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slices off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slices off less if proportion results in a non-integer slice index
(i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[sl], axis=axis)
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
.. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA.
http://http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
F_onewayResult(statistic=7.1210194716424473, pvalue=0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / float(len(a))
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= (_square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
return F_onewayResult(f, prob)
def pearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as x increases, so does
y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
(Pearson's correlation coefficient,
2-tailed p-value)
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = _betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if contains_nan and nan_policy == 'propagate':
return SpearmanrResult(np.nan, np.nan)
if a.size <= 1:
return SpearmanrResult(np.nan, np.nan)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
contains_nan, nan_policy = _contains_nan(b, nan_policy)
if contains_nan and nan_policy == 'omit':
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if contains_nan and nan_policy == 'propagate':
return SpearmanrResult(np.nan, np.nan)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
t = rs * np.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
return SpearmanrResult(rs, prob)
def pointbiserialr(x, y):
"""
Calculates a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] http://onlinelibrary.wiley.com/doi/10.1002/9781118445112.stat06227/full
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation',
'pvalue'))
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
def kendalltau(x, y, initial_lexsort=True, nan_policy='propagate'):
"""
Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the tau-b version of Kendall's tau which
accounts for ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Whether to use lexsort or quicksort as the sorting method for the
initial sort of the inputs. Default is lexsort (True), for which
`kendalltau` is of complexity O(n log(n)). If False, the complexity is
O(n^2), but with a smaller pre-factor (so quicksort may be faster for
small arrays).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
The definition of Kendall's tau that is used is::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association, Vol. 61,
No. 314, Part 1, pp. 436-439, 1966.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.24821309157521476
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
contains_nan, nan_policy = (_contains_nan(x, nan_policy) or
_contains_nan(y, nan_policy))
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y)
n = np.int64(len(x))
temp = list(range(n)) # support structure used by mergesort
# this closure recursively sorts sections of perm[] by comparing
# elements of y[perm[]] using temp[] as support
# returns the number of swaps required by an equivalent bubble sort
def mergesort(offs, length):
exchcnt = 0
if length == 1:
return 0
if length == 2:
if y[perm[offs]] <= y[perm[offs+1]]:
return 0
t = perm[offs]
perm[offs] = perm[offs+1]
perm[offs+1] = t
return 1
length0 = length // 2
length1 = length - length0
middle = offs + length0
exchcnt += mergesort(offs, length0)
exchcnt += mergesort(middle, length1)
if y[perm[middle - 1]] < y[perm[middle]]:
return exchcnt
# merging
i = j = k = 0
while j < length0 or k < length1:
if k >= length1 or (j < length0 and y[perm[offs + j]] <=
y[perm[middle + k]]):
temp[i] = perm[offs + j]
d = i - j
j += 1
else:
temp[i] = perm[middle + k]
d = (offs + i) - (middle + k)
k += 1
if d > 0:
exchcnt += d
i += 1
perm[offs:offs+length] = temp[0:length]
return exchcnt
# initial sort on values of x and, if tied, on values of y
if initial_lexsort:
# sort implemented as mergesort, worst case: O(n log(n))
perm = np.lexsort((y, x))
else:
# sort implemented as quicksort, 30% faster but with worst case: O(n^2)
perm = list(range(n))
perm.sort(key=lambda a: (x[a], y[a]))
# compute joint ties
first = 0
t = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]:
t += ((i - first) * (i - first - 1)) // 2
first = i
t += ((n - first) * (n - first - 1)) // 2
# compute ties in x
first = 0
u = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]]:
u += ((i - first) * (i - first - 1)) // 2
first = i
u += ((n - first) * (n - first - 1)) // 2
# count exchanges
exchanges = mergesort(0, n)
# compute ties in y after mergesort with counting
first = 0
v = 0
for i in xrange(1, n):
if y[perm[first]] != y[perm[i]]:
v += ((i - first) * (i - first - 1)) // 2
first = i
v += ((n - first) * (n - first - 1)) // 2
tot = (n * (n - 1)) // 2
if tot == u or tot == v:
# Special case for all ties in both ranks
return KendalltauResult(np.nan, np.nan)
# Prevent overflow; equal to np.sqrt((tot - u) * (tot - v))
denom = np.exp(0.5 * (np.log(tot - u) + np.log(tot - v)))
tau = ((tot - (v + u - t)) - 2.0 * exchanges) / denom
# what follows reproduces the ending of Gary Strangman's original
# stats.kendalltau() in SciPy
svar = (4.0 * n + 10.0) / (9.0 * n * (n - 1))
z = tau / np.sqrt(svar)
prob = special.erfc(np.abs(z) / 1.4142136)
return KendalltauResult(tau, prob)
#####################################
# INFERENTIAL STATISTICS #
#####################################
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculates the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
df = ((vn1 + vn2)**2) / ((vn1**2) / (n1 - 1) + (vn2**2) / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / float(df)
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
# check both a and b
contains_nan, nan_policy = (_contains_nan(a, nan_policy) or
_contains_nan(b, nan_policy))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
# check both a and b
contains_nan, nan_policy = (_contains_nan(a, nan_policy) or
_contains_nan(b, nan_policy))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_rel(a, b, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D,
distributions.kstwobign.sf(D * np.sqrt(N)))
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", http://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
Power_divergenceResult = namedtuple('Power_divergenceResult', ('statistic',
'pvalue'))
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculates a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
statistic : float
KS statistic
pvalue : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
# Note: d absolute not signed distance
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
return Ks_2sampResult(d, prob)
def mannwhitneyu(x, y, use_continuity=True, alternative='two-sided'):
"""
Computes the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
Returns
-------
statistic : float
The Mann-Whitney statistics.
pvalue : float
One-sided p-value assuming a asymptotic normal distribution.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
The reported p-value is for a one-sided hypothesis, to get the two-sided
p-value multiply the returned p-value by 2.
"""
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
fact2 = 1
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative == 'less':
z = u1 - meanrank
elif alternative == 'greater':
z = u2 - meanrank
elif alternative == 'two-sided':
bigu = max(u1, u2)
z = np.abs(bigu - meanrank)
fact2 = 2.
else:
raise ValueError("alternative should be 'less', 'greater'"
"or 'two-sided'")
z = z / sd
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic',
'pvalue'))
return MannwhitneyuResult(u2, distributions.norm.sf(z) * fact2)
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
pvalue : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
return RanksumsResult(z, prob)
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
See Also
--------
f_oneway : 1-way ANOVA
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.27272727272727337, pvalue=0.60150813444058948)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.030197383422318501)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
def friedmanchisquare(*args):
"""
Computes the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
the test statistic, correcting for ties
pvalue : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
@np.deprecate(message="stats.chisqprob is deprecated in scipy 0.17.0; "
"use stats.distributions.chi2.sf instead.")
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return distributions.chi2.sf(chisq, df)
@np.deprecate(message="stats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead")
def betai(a, b, x):
"""
Returns the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
@np.deprecate(message="stats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
@np.deprecate(message="stats.f_value deprecated in scipy 0.17.0")
def f_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
@np.deprecate(message="stats.f_value_multivariate deprecated in scipy 0.17.0")
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
@np.deprecate(message="scipy.stats.ss is deprecated in scipy 0.17.0")
def ss(a, axis=0):
return _sum_of_squares(a, axis)
def _sum_of_squares(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
@np.deprecate(message="scipy.stats.square_of_sums is deprecated "
"in scipy 0.17.0")
def square_of_sums(a, axis=0):
return _square_of_sums(a, axis)
def _square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
| bsd-3-clause |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/numpy/doc/creation.py | 54 | 5503 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or record arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| gpl-2.0 |
ZENGXH/scikit-learn | examples/cluster/plot_lena_compress.py | 271 | 2229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
| bsd-3-clause |
Anjum48/pymc3 | pymc3/examples/stochastic_volatility.py | 13 | 4096 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
from matplotlib.pylab import *
import numpy as np
from pymc3 import *
from pymc3.distributions.timeseries import *
from scipy.sparse import csc_matrix
from scipy import optimize
# <markdowncell>
# Asset prices have time-varying volatility (variance of day over day `returns`). In some periods, returns are highly variable, while in others very stable. Stochastic volatility models model this with a latent volatility variable, modeled as a stochastic process. The following model is similar to the one described in the No-U-Turn Sampler paper, Hoffman (2011) p21.
#
# $$ \sigma \sim Exponential(50) $$
#
# $$ \nu \sim Exponential(.1) $$
#
# $$ s_i \sim Normal(s_{i-1}, \sigma^{-2}) $$
#
# $$ log(\frac{y_i}{y_{i-1}}) \sim t(\nu, 0, exp(-2 s_i)) $$
#
# Here, $y$ is the daily return series and $s$ is the latent log
# volatility process.
# <markdowncell>
# ## Build Model
# <markdowncell>
# First we load some daily returns of the S&P 500.
# <codecell>
n = 400
returns = np.genfromtxt(get_data_file('pymc3.examples', "data/SP500.csv"))[-n:]
returns[:5]
# <markdowncell>
# Specifying the model in pymc3 mirrors its statistical specification.
#
# However, it is easier to sample the scale of the log volatility process innovations, $\sigma$, on a log scale, so we create it using `TransformedVar` and use `logtransform`. `TransformedVar` creates one variable in the transformed space and one in the normal space. The one in the transformed space (here $\text{log}(\sigma) $) is the one over which sampling will occur, and the one in the normal space is the one to use throughout the rest of the model.
#
# It takes a variable name, a distribution and a transformation to use.
# <codecell>
model = Model()
with model:
sigma= Exponential('sigma', 1. / .02, testval=.1)
nu = Exponential('nu', 1. / 10)
s = GaussianRandomWalk('s', sigma ** -2, shape=n)
r = T('r', nu, lam=exp(-2 * s), observed=returns)
# <markdowncell>
# ## Fit Model
#
# To get a decent scaling matrix for the Hamiltonian sampler, we find the Hessian at a point. The method `Model.d2logpc` gives us a `Theano` compiled function that returns the matrix of 2nd derivatives.
#
# However, the 2nd derivatives for the degrees of freedom parameter, `nu`, are negative and thus not very informative and make the matrix non-positive definite, so we replace that entry with a reasonable guess at the scale. The interactions between `log_sigma`/`nu` and `s` are also not very useful, so we set them to zero.
# <markdowncell>
# For this model, the full maximum a posteriori (MAP) point is degenerate and has infinite density. However, if we fix `log_sigma` and `nu` it is no longer degenerate, so we find the MAP with respect to the volatility process, 's', keeping `log_sigma` and `nu` constant at their default values.
#
# We use L-BFGS because it is more efficient for high dimensional
# functions (`s` has n elements).
# <markdowncell>
# We do a short initial run to get near the right area, then start again
# using a new Hessian at the new starting point to get faster sampling due
# to better scaling. We do a short run since this is an interactive
# example.
# <codecell>
def run(n=2000):
if n == "short":
n = 50
with model:
start = find_MAP(vars=[s], fmin=optimize.fmin_l_bfgs_b)
step = NUTS(model.vars, scaling=start, gamma=.25)
trace = sample(5, step, start)
# Start next run at the last sampled position.
start2 = trace.point(-1)
step2 = NUTS(model.vars, scaling=start2, gamma=.25)
trace = sample(n, step2, trace=trace)
# <codecell>
# figsize(12,6)
title(str(s))
plot(trace[s][::10].T, 'b', alpha=.03)
xlabel('time')
ylabel('log volatility')
# figsize(12,6)
traceplot(trace, model.vars[:-1])
if __name__ == '__main__':
run()
# <markdowncell>
# ## References
#
# 1. Hoffman & Gelman. (2011). [The No-U-Turn Sampler: Adaptively Setting
# Path Lengths in Hamiltonian Monte
# Carlo](http://arxiv.org/abs/1111.4246).
| apache-2.0 |
rohanp/scikit-learn | sklearn/decomposition/pca.py | 20 | 23579 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Michael Eickenberg <michael.eickenberg@inria.fr>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data. The components are sorted by
explained_variance_.
explained_variance_ : array, [n_components]
The amount of variance explained by each of the selected components.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0.
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Equal to `X.mean(axis=1)`.
n_components_ : int
The estimated number of components. Relevant when `n_components` is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_) # doctest: +ELLIPSIS
[ 6.6162... 0.05038...]
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space using `n_components_`.
Returns an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components. X represents
data from the projection on to the principal components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 2 by default.
.. versionchanged:: 0.18
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
k is not set then all components are stored and the sum of explained
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=2, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=2,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
jorik041/scikit-learn | examples/model_selection/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
louispotok/pandas | pandas/tests/indexes/timedeltas/test_construction.py | 3 | 3568 | import pytest
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas import TimedeltaIndex, timedelta_range, to_timedelta
class TestTimedeltaIndex(object):
def test_construction_base_constructor(self):
arr = [pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')]
tm.assert_index_equal(pd.Index(arr), pd.TimedeltaIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.TimedeltaIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timedelta('1 days')]
tm.assert_index_equal(pd.Index(arr), pd.TimedeltaIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.TimedeltaIndex(np.array(arr)))
def test_constructor(self):
expected = TimedeltaIndex(['1 days', '1 days 00:00:05', '2 days',
'2 days 00:00:02', '0 days 00:00:03'])
result = TimedeltaIndex(['1 days', '1 days, 00:00:05', np.timedelta64(
2, 'D'), timedelta(days=2, seconds=2), pd.offsets.Second(3)])
tm.assert_index_equal(result, expected)
# unicode
result = TimedeltaIndex([u'1 days', '1 days, 00:00:05', np.timedelta64(
2, 'D'), timedelta(days=2, seconds=2), pd.offsets.Second(3)])
expected = TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01',
'0 days 00:00:02'])
tm.assert_index_equal(TimedeltaIndex(range(3), unit='s'), expected)
expected = TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:05',
'0 days 00:00:09'])
tm.assert_index_equal(TimedeltaIndex([0, 5, 9], unit='s'), expected)
expected = TimedeltaIndex(
['0 days 00:00:00.400', '0 days 00:00:00.450',
'0 days 00:00:01.200'])
tm.assert_index_equal(TimedeltaIndex([400, 450, 1200], unit='ms'),
expected)
def test_constructor_coverage(self):
rng = timedelta_range('1 days', periods=10.5)
exp = timedelta_range('1 days', periods=10)
tm.assert_index_equal(rng, exp)
msg = 'periods must be a number, got foo'
with tm.assert_raises_regex(TypeError, msg):
TimedeltaIndex(start='1 days', periods='foo', freq='D')
pytest.raises(ValueError, TimedeltaIndex, start='1 days',
end='10 days')
pytest.raises(ValueError, TimedeltaIndex, '1 days')
# generator expression
gen = (timedelta(i) for i in range(10))
result = TimedeltaIndex(gen)
expected = TimedeltaIndex([timedelta(i) for i in range(10)])
tm.assert_index_equal(result, expected)
# NumPy string array
strings = np.array(['1 days', '2 days', '3 days'])
result = TimedeltaIndex(strings)
expected = to_timedelta([1, 2, 3], unit='d')
tm.assert_index_equal(result, expected)
from_ints = TimedeltaIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# non-conforming freq
pytest.raises(ValueError, TimedeltaIndex,
['1 days', '2 days', '4 days'], freq='D')
pytest.raises(ValueError, TimedeltaIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = TimedeltaIndex(start='1 days', periods=1, freq='D', name='TEST')
assert idx.name == 'TEST'
# GH10025
idx2 = TimedeltaIndex(idx, name='something else')
assert idx2.name == 'something else'
| bsd-3-clause |
edhuckle/statsmodels | statsmodels/tsa/tests/test_x13.py | 27 | 1877 | from nose import SkipTest
from numpy.testing import assert_
from statsmodels.tsa.base.datetools import dates_from_range
from statsmodels.tsa.x13 import _find_x12, x13_arima_select_order
x13path = _find_x12()
if x13path is False:
_have_x13 = False
else:
_have_x13 = True
class TestX13(object):
@classmethod
def setupClass(cls):
if not _have_x13:
raise SkipTest('X13/X12 not available')
import pandas as pd
from statsmodels.datasets import macrodata, co2
dta = macrodata.load_pandas().data
dates = dates_from_range('1959Q1', '2009Q3')
index = pd.DatetimeIndex(dates)
dta.index = index
cls.quarterly_data = dta.dropna()
dta = co2.load_pandas().data
dta['co2'] = dta.co2.interpolate()
cls.monthly_data = dta.resample('M')
cls.monthly_start_data = dta.resample('MS')
def test_x13_arima_select_order(self):
res = x13_arima_select_order(self.monthly_data)
assert_(isinstance(res.order, tuple))
assert_(isinstance(res.sorder, tuple))
res = x13_arima_select_order(self.monthly_start_data)
assert_(isinstance(res.order, tuple))
assert_(isinstance(res.sorder, tuple))
res = x13_arima_select_order(self.monthly_data.co2)
assert_(isinstance(res.order, tuple))
assert_(isinstance(res.sorder, tuple))
res = x13_arima_select_order(self.monthly_start_data.co2)
assert_(isinstance(res.order, tuple))
assert_(isinstance(res.sorder, tuple))
res = x13_arima_select_order(self.quarterly_data[['realgdp']])
assert_(isinstance(res.order, tuple))
assert_(isinstance(res.sorder, tuple))
res = x13_arima_select_order(self.quarterly_data.realgdp)
assert_(isinstance(res.order, tuple))
assert_(isinstance(res.sorder, tuple))
| bsd-3-clause |
RPGOne/scikit-learn | examples/feature_stacker.py | 80 | 1911 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way too high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
rossant/galry | examples/filter.py | 1 | 3105 | """GPU-based image processing filters."""
import os
from galry import *
import pylab as plt
# we define a list of 3x3 image filter kernels
KERNELS = dict(
original=np.array([[0,0,0],[0,1,0],[0,0,0]]),
sharpen=np.array([[0,-1,0],[-1,5,-1],[0,-1,0]]),
sobel=np.array([[1,0,-1],[2,0,-1],[1,0,-1]]),
emboss=np.array([[-2,-1,0],[-1,1,1],[0,1,2]]),
blur=np.array([[1,2,1],[2,4,2],[1,2,1]]),
derivex=np.array([[0,0,0],[-1,0,1],[0,0,0]]),
edges=np.array([[0,1,0],[1,-4,1],[0,1,0]]),
)
# current kernel index
CURRENT_KERNEL_IDX = 0
# we specialize the texture visual (which displays a 2D image) to add
# GPU filtering capabilities
class FilterVisual(TextureVisual):
def initialize_fragment(self):
# elementary step in the texture, where the coordinates are normalized
# in [0, 1]. The step is then 1/size of the texture.
self.add_uniform("step", data=1. / self.texsize[0])
# uniform 3x3 matrix variable
self.add_uniform("kernel", vartype="float", ndim=(3,3),
data=KERNELS['original'])
# we add some code in the fragment shader which computes the filtered
# texture
self.add_fragment_main("""
/* Compute the convolution of the texture with the kernel */
// The output color is a vec4 variable called `out_color`.
out_color = vec4(0., 0., 0., 1.);
// We compute the convolution.
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
{
// The variables are those defined in the base class
// TextureVisual.
out_color.xyz += kernel[i][j] * texture2D(tex_sampler,
varying_tex_coords + step * vec2(j - 1, i - 1)).xyz;
}
}
""")
def change_kernel(figure, parameter):
# we update the kernel index
global CURRENT_KERNEL_IDX
CURRENT_KERNEL_IDX += parameter
CURRENT_KERNEL_IDX = np.mod(CURRENT_KERNEL_IDX, len(KERNELS))
# we get the kernel name and matrix
name = KERNELS.keys()[CURRENT_KERNEL_IDX]
kernel = np.array(KERNELS[name], dtype=np.float32)
# we normalize the kernel
if kernel.sum() != 0:
kernel = kernel / float(kernel.sum())
# now we change the kernel variable in the image
figure.set_data(kernel=kernel, visual='image')
# and the text in the legend
figure.set_data(text="%s filter" % name, visual='legend')
# create square figure
figure(constrain_ratio=True, constrain_navigation=True, figsize=(512,512))
# load the texture from an image thanks to matplotlib
path = os.path.dirname(os.path.realpath(__file__))
texture = plt.imread(os.path.join(path, "images/lena.png"))
# we add our custom visual
visual(FilterVisual, texture=texture, name='image')
# we add some text
text(text='original filter', name='legend', coordinates=(0,.95), is_static=True)
# add left and right arrow action handler
action('KeyPress', change_kernel, key='Left', param_getter=-1)
action('KeyPress', change_kernel, key='Right', param_getter=1)
show()
| bsd-3-clause |
assisi/workshops | binary_choice/two_arenas_real_real/casu_utils.py | 5 | 8116 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
a library of functions used in CASU controller dynamics. Got a lot of
messy code that would be neater like this
RM, Feb 2015
'''
import numpy as np
from assisipy import casu
#import matplotlib.cm as cm
from datetime import datetime
import parsing
import time
### ============= maths ============= ###
#{{{ rolling_avg
def rolling_avg(x, n):
'''
given the sample x, provide a rolling average taking n samples per data point.
NOT a quick solution, but easy...
'''
y = np.zeros((len(x),))
for ctr in range(len(x)):
y[ctr] = np.sum(x[ctr:(ctr+n)])
return y/n
#}}}
### ============= general behaviour ============= ###
#{{{ measure_ir_sensors
def measure_ir_sensors(mycasu, detect_data):
''' count up sensors that detect a bee, plus rotate history array '''
# don't discriminate between specific directions, so just accumulate all
count = 0
for (val,t) in zip(mycasu.get_ir_raw_value(casu.ARRAY), mycasu.threshold):
if (val > t):
count += 1
#print "raw:",
#print ",".join(["{:.2f}".format(x) for x in mycasu.get_ir_raw_value(casu.ARRAY)])
#mycasu.total_count += count # historical count over all time
detect_data = np.roll(detect_data, 1) # step all positions back
detect_data[0] = count # and overwrite the first entry (this was rolled
# around, so is the oldest entry -- and to become the newest now)
# allow ext usage to apply window -- remain agnostic here during collection.
return detect_data, count
#}}}
#{{{ heater_one_step
def heater_one_step(h):
'''legacy function'''
return detect_bee_proximity_saturated(h)
def detect_bee_proximity_saturated(h):
# measure proximity
detect_data, count = measure_ir_sensors(h, h.detect_data)
h.detect_data = detect_data
# overall bee count for this casu
sat_count = min(h.sat_lim, count) # saturates
return sat_count
#}}}
#{{{ find_mean_ext_temp
def find_mean_ext_temp(h):
r = []
for sensor in [casu.TEMP_F, casu.TEMP_B, casu.TEMP_L, casu.TEMP_R ]:
r.append(h.get_temp(sensor))
if len(r):
mean = sum(r) / float(len(r))
else:
mean = 0.0
return mean
#}}}
### ============= inter-casu comms ============= ###
#{{{ comms functions
def transmit_my_count(h, sat_count, dest='accomplice'):
s = "{}".format(sat_count)
if h.verb > 1:
print "\t[i]==> {} send msg ({} by): '{}' bees, to {}".format(
h._thename, len(s), s, dest)
h.send_message(dest, s)
#TODO: this is non-specific, i.e., any message from anyone is assumed to have
# the right form. For heterogeneous neighbours, we need to check identity as
# well
def recv_all_msgs(h, retry_cnt=0, max_recv=None):
'''
continue to read message bffer until no more messages.
as list of parsed messages parsed into (src, float) pairs
'''
msgs = []
try_cnt = 0
while(True):
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
src = msg['sender']
bee_cnt = float(txt.split()[0])
msgs.append((src, bee_cnt))
if h.verb >1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, {4} from {0} {5}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename,
BLU, ENDC)
if h.verb > 1:
#print dir(msg)
print msg.items()
if(max_recv is not None and len(msgs) >= max_recv):
break
else:
# buffer emptied, return
try_cnt += 1
if try_cnt > retry_cnt:
break
return msgs
def recv_neighbour_msg(h):
bee_cnt = 0
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
bee_cnt = int(txt.split()[0])
if h.verb >1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename)
return bee_cnt;
def recv_neighbour_msg_w_src(h):
''' provide the source of a message as well as the message count'''
bee_cnt = 0
src = None
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
src = msg['sender']
bee_cnt = float(txt.split()[0])
if h.verb >1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename)
if h.verb > 1:
#print dir(msg)
print msg.items()
return bee_cnt, src
def recv_neighbour_msg_flt(h):
bee_cnt = 0
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
bee_cnt = float(txt.split()[0])
if h.verb > 1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename)
return bee_cnt;
#}}}
def find_comms_mapping(name, rtc_path, suffix='-sim', verb=True):
links = parsing.find_comm_link_mapping(
name, rtc_path=rtc_path, suffix=suffix, verb=verb)
if verb:
print "[I] for {}, found the following nodes/edges".format(name)
print "\t", links.items()
print "\n===================================\n\n"
return links
### ============= display ============= ###
#{{{ term codes for colored text
ERR = '\033[41m'
BLU = '\033[34m'
ENDC = '\033[0m'
#}}}
#{{{ color funcs
#def gen_cmap(m='hot', n=32) :
# return cm.get_cmap(m, n) # get LUT with 32 values -- some gradation but see steps
def gen_clr_tgt(new_temp, cmap, tgt=None, min_temp=28.0, max_temp=38.0):
t_rng = float(max_temp - min_temp)
fr = (new_temp - min_temp) / t_rng
i = int(fr * len(cmap))
# compute basic color, if on target
#r,g,b,a = cmap(i)
g = 0.0; b = 0.0; a = 1.0;
i = sorted([0, i, len(cmap)-1])[1]
r = cmap[i]
# now adjust according to distance from target
if tgt is None: tgt=new_temp
dt = np.abs(new_temp - tgt)
dt_r = dt / t_rng
h2 = np.array([r,g,b])
h2 *= (1-dt_r)
return h2
# a colormap with 8 settings, taht doesn't depend on the presence of
# matplotlib (hard-coded though.) -- depricating
_clrs = [
(0.2, 0.2, 0.2),
(0.041, 0, 0),
(0.412, 0, 0),
(0.793, 0, 0),
(1, 0.174, 0),
(1, 0.555, 0),
(1, 0.936, 0),
(1, 1, 0.475),
(1, 1, 1),
]
_dflt_clr = (0.2, 0.2, 0.2)
# can access other gradations of colour using M = cm.hot(n) for n steps, then
# either extract them once (`clrs = M(arange(n)`) or each time ( `clr_x = M(x)`)
# BT here we're going to use 8 steps for all CASUs so no bother.
#}}}
def sep_with_nowtime():
print "# =================== t={} =================== #\n".format(
datetime.now().strftime("%H:%M:%S"))
### ============= more generic ============= ###
#{{{ a struct constructor
# some handy python utilities, from Kier Dugan
class Struct:
def __init__ (self, **kwargs):
self.__dict__.update (kwargs)
def get(self, key, default=None):
return self.__dict__.get(key, default)
def addFields(self, **kwargs):
# add other fields (basically variables) after initialisation
self.__dict__.update (kwargs)
#}}}
### calibraiont
def _calibrate(h, calib_steps, calib_gain=1.1, interval=0.1):
'''
read the sensors several times, and take the highest reading
seen as the threshold.
'''
h._raw_thresh = [0] * 7 # default cases for threshold
for stp in xrange(calib_steps):
for i, v in enumerate(h.get_ir_raw_value(casu.ARRAY)):
if v > h._raw_thresh[i]:
h._raw_thresh[i] = v
time.sleep(interval)
h.thresh = [x*calib_gain for x in h._raw_thresh]
h.threshold = [x*calib_gain for x in h._raw_thresh]
if h.verb:
_ts =", ".join(["{:.2f}".format(x) for x in h.thresh])
print "[I] post-calibration, we have thresh: ", _ts
| lgpl-3.0 |
anirudhjayaraman/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
rdawis/SoundSpace | main.py | 1 | 3588 | """
Copyright (C) 2017 Raquel Dawis
This file is part of SoundSpace.
SoundSpace is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
from img_deconstruct import Image
from sound_reconstruct import SoundSpace
import scipy.io.wavfile as wav
import matplotlib.pyplot as plt #*
import numpy as np
import Tkinter as tk #*
import tkFileDialog as fd #*
import os
import sys
import timeit #*
#* things with the asterisk mean that it's kinda just extra stuff that you
# only need for the main file. Matplotlib is extra if you just want the
# sound, the Tkinter stuff is extra if you want like a file dialog thing.
# Everything that's necessary calls is not marked in asterisks.
# TO DO:
# Clean this shit up.
# [x] Image deconstruction
# [x] Data object from deconstruction
# [x] Sound reconstruction
# To revamp:
# Combine sound gen and sound hrtf patch to one object
# [x] Clean up main :)
# [x] Add progress text
# [ ] Add documentation to the libraries
# Image Select
if len(sys.argv) < 2:
root = tk.Tk()
root.withdraw()
FILEOPENOPTIONS = dict(defaultextension='.fits',
filetypes=[('FITS file','*.fits')])
print 'Selecting images...'
filename = fd.askopenfilename(**FILEOPENOPTIONS)
else:
filename = sys.argv[1]
print filename
try:
print 'Done. Opening %s' % filename
except TypeError:
sys.exit('No file found, quitting...')
print "Deconstructing Image..."
start = timeit.default_timer()
proc_start = start
a = Image(filename)
end = timeit.default_timer() - start
print "reduction make: %0.6f" % end
print "Done."
print "Creating fast map..."
start = timeit.default_timer()
fast = SoundSpace(a.reduction, 1.0)
end = timeit.default_timer() - start
print "fast map make: %0.6f" % end
print "Creating slow map..."
start = timeit.default_timer()
slow = SoundSpace(a.reduction, 3.0)
end = timeit.default_timer() - start
print "slow map make: %0.6f" % end
print "Done."
dire = 'objects/%s' % a.name
try:
os.makedirs(dire)
except OSError:
if not os.path.isdir(dire):
raise
print "Plotting your demis- I mean, downf- I mean, images :)"
fig, axes = plt.subplots(nrows=1, ncols=2)
axes[0].imshow(np.log10(a.data.T), origin='lower', cmap='gist_stern')
axes[1].imshow(a.reduction.T, origin='lower', cmap='gist_stern')
print 'Finalizing wav writes and png writes...'
start = timeit.default_timer()
wav.write(os.path.join(dire, 'fast_%s.wav' % a.name), 44100, fast.stereo_sig)
end = timeit.default_timer() - start
print "fast wave save: %0.6f" % end
start = timeit.default_timer()
wav.write(os.path.join(dire, 'slow_%s.wav' % a.name), 44100, slow.stereo_sig)
end = timeit.default_timer() - start
print "slow wave save: %0.6f" % end
start = timeit.default_timer()
fig.savefig(os.path.join(dire, '%s.svg' % a.name), format='svg', dpi=1200)
end = timeit.default_timer() - start
print "figure save: %0.6f" % end
print 'Done. :)'
proc_end = timeit.default_timer() - proc_start
print 'process time: %06f' % proc_end
plt.show()
| gpl-3.0 |
humdings/zipline | tests/test_finance.py | 5 | 17083 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.finance package
"""
from datetime import datetime, timedelta
import os
from nose.tools import timed
import numpy as np
import pandas as pd
import pytz
from six import iteritems
from six.moves import range
from testfixtures import TempDirectory
from zipline.assets.synthetic import make_simple_equity_info
from zipline.finance.blotter import Blotter
from zipline.finance.execution import MarketOrder, LimitOrder
from zipline.finance.performance import PerformanceTracker
from zipline.finance.trading import SimulationParameters
from zipline.data.us_equity_pricing import BcolzDailyBarReader
from zipline.data.minute_bars import BcolzMinuteBarReader
from zipline.data.data_portal import DataPortal
from zipline.data.us_equity_pricing import BcolzDailyBarWriter
from zipline.finance.slippage import FixedSlippage
from zipline.finance.asset_restrictions import NoRestrictions
from zipline.protocol import BarData
from zipline.testing import (
tmp_trading_env,
write_bcolz_minute_data,
)
from zipline.testing.fixtures import (
WithLogger,
WithTradingEnvironment,
ZiplineTestCase,
)
import zipline.utils.factory as factory
DEFAULT_TIMEOUT = 15 # seconds
EXTENDED_TIMEOUT = 90
_multiprocess_can_split_ = False
class FinanceTestCase(WithLogger,
WithTradingEnvironment,
ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = 1, 2, 133
start = START_DATE = pd.Timestamp('2006-01-01', tz='utc')
end = END_DATE = pd.Timestamp('2006-12-31', tz='utc')
def init_instance_fixtures(self):
super(FinanceTestCase, self).init_instance_fixtures()
self.zipline_test_config = {'sid': 133}
# TODO: write tests for short sales
# TODO: write a test to do massive buying or shorting.
@timed(DEFAULT_TIMEOUT)
def test_partially_filled_orders(self):
# create a scenario where order size and trade size are equal
# so that orders must be spread out over several trades.
params = {
'trade_count': 360,
'trade_interval': timedelta(minutes=1),
'order_count': 2,
'order_amount': 100,
'order_interval': timedelta(minutes=1),
# because we placed two orders for 100 shares each, and the volume
# of each trade is 100, and by default you can take up 2.5% of the
# bar's volume, the simulator should spread the order into 100
# trades of 2 shares per order.
'expected_txn_count': 100,
'expected_txn_volume': 2 * 100,
'default_slippage': True
}
self.transaction_sim(**params)
# same scenario, but with short sales
params2 = {
'trade_count': 360,
'trade_interval': timedelta(minutes=1),
'order_count': 2,
'order_amount': -100,
'order_interval': timedelta(minutes=1),
'expected_txn_count': 100,
'expected_txn_volume': 2 * -100,
'default_slippage': True
}
self.transaction_sim(**params2)
@timed(DEFAULT_TIMEOUT)
def test_collapsing_orders(self):
# create a scenario where order.amount <<< trade.volume
# to test that several orders can be covered properly by one trade,
# but are represented by multiple transactions.
params1 = {
'trade_count': 6,
'trade_interval': timedelta(hours=1),
'order_count': 24,
'order_amount': 1,
'order_interval': timedelta(minutes=1),
# because we placed an orders totaling less than 25% of one trade
# the simulator should produce just one transaction.
'expected_txn_count': 24,
'expected_txn_volume': 24
}
self.transaction_sim(**params1)
# second verse, same as the first. except short!
params2 = {
'trade_count': 6,
'trade_interval': timedelta(hours=1),
'order_count': 24,
'order_amount': -1,
'order_interval': timedelta(minutes=1),
'expected_txn_count': 24,
'expected_txn_volume': -24
}
self.transaction_sim(**params2)
# Runs the collapsed trades over daily trade intervals.
# Ensuring that our delay works for daily intervals as well.
params3 = {
'trade_count': 6,
'trade_interval': timedelta(days=1),
'order_count': 24,
'order_amount': 1,
'order_interval': timedelta(minutes=1),
'expected_txn_count': 24,
'expected_txn_volume': 24
}
self.transaction_sim(**params3)
@timed(DEFAULT_TIMEOUT)
def test_alternating_long_short(self):
# create a scenario where we alternate buys and sells
params1 = {
'trade_count': int(6.5 * 60 * 4),
'trade_interval': timedelta(minutes=1),
'order_count': 4,
'order_amount': 10,
'order_interval': timedelta(hours=24),
'alternate': True,
'complete_fill': True,
'expected_txn_count': 4,
'expected_txn_volume': 0 # equal buys and sells
}
self.transaction_sim(**params1)
def transaction_sim(self, **params):
"""This is a utility method that asserts expected
results for conversion of orders to transactions given a
trade history
"""
trade_count = params['trade_count']
trade_interval = params['trade_interval']
order_count = params['order_count']
order_amount = params['order_amount']
order_interval = params['order_interval']
expected_txn_count = params['expected_txn_count']
expected_txn_volume = params['expected_txn_volume']
# optional parameters
# ---------------------
# if present, alternate between long and short sales
alternate = params.get('alternate')
# if present, expect transaction amounts to match orders exactly.
complete_fill = params.get('complete_fill')
asset1 = self.asset_finder.retrieve_asset(1)
metadata = make_simple_equity_info([asset1.sid], self.start, self.end)
with TempDirectory() as tempdir, \
tmp_trading_env(equities=metadata,
load=self.make_load_function()) as env:
if trade_interval < timedelta(days=1):
sim_params = factory.create_simulation_parameters(
start=self.start,
end=self.end,
data_frequency="minute"
)
minutes = self.trading_calendar.minutes_window(
sim_params.first_open,
int((trade_interval.total_seconds() / 60) * trade_count)
+ 100)
price_data = np.array([10.1] * len(minutes))
assets = {
asset1.sid: pd.DataFrame({
"open": price_data,
"high": price_data,
"low": price_data,
"close": price_data,
"volume": np.array([100] * len(minutes)),
"dt": minutes
}).set_index("dt")
}
write_bcolz_minute_data(
self.trading_calendar,
self.trading_calendar.sessions_in_range(
self.trading_calendar.minute_to_session_label(
minutes[0]
),
self.trading_calendar.minute_to_session_label(
minutes[-1]
)
),
tempdir.path,
iteritems(assets),
)
equity_minute_reader = BcolzMinuteBarReader(tempdir.path)
data_portal = DataPortal(
env.asset_finder, self.trading_calendar,
first_trading_day=equity_minute_reader.first_trading_day,
equity_minute_reader=equity_minute_reader,
)
else:
sim_params = factory.create_simulation_parameters(
data_frequency="daily"
)
days = sim_params.sessions
assets = {
1: pd.DataFrame({
"open": [10.1] * len(days),
"high": [10.1] * len(days),
"low": [10.1] * len(days),
"close": [10.1] * len(days),
"volume": [100] * len(days),
"day": [day.value for day in days]
}, index=days)
}
path = os.path.join(tempdir.path, "testdata.bcolz")
BcolzDailyBarWriter(path, self.trading_calendar, days[0],
days[-1]).write(
assets.items()
)
equity_daily_reader = BcolzDailyBarReader(path)
data_portal = DataPortal(
env.asset_finder, self.trading_calendar,
first_trading_day=equity_daily_reader.first_trading_day,
equity_daily_reader=equity_daily_reader,
)
if "default_slippage" not in params or \
not params["default_slippage"]:
slippage_func = FixedSlippage()
else:
slippage_func = None
blotter = Blotter(sim_params.data_frequency, slippage_func)
start_date = sim_params.first_open
if alternate:
alternator = -1
else:
alternator = 1
tracker = PerformanceTracker(sim_params, self.trading_calendar,
self.env)
# replicate what tradesim does by going through every minute or day
# of the simulation and processing open orders each time
if sim_params.data_frequency == "minute":
ticks = minutes
else:
ticks = days
transactions = []
order_list = []
order_date = start_date
for tick in ticks:
blotter.current_dt = tick
if tick >= order_date and len(order_list) < order_count:
# place an order
direction = alternator ** len(order_list)
order_id = blotter.order(
asset1,
order_amount * direction,
MarketOrder())
order_list.append(blotter.orders[order_id])
order_date = order_date + order_interval
# move after market orders to just after market next
# market open.
if order_date.hour >= 21:
if order_date.minute >= 00:
order_date = order_date + timedelta(days=1)
order_date = order_date.replace(hour=14, minute=30)
else:
bar_data = BarData(
data_portal=data_portal,
simulation_dt_func=lambda: tick,
data_frequency=sim_params.data_frequency,
trading_calendar=self.trading_calendar,
restrictions=NoRestrictions(),
)
txns, _, closed_orders = blotter.get_transactions(bar_data)
for txn in txns:
tracker.process_transaction(txn)
transactions.append(txn)
blotter.prune_orders(closed_orders)
for i in range(order_count):
order = order_list[i]
self.assertEqual(order.asset, asset1)
self.assertEqual(order.amount, order_amount * alternator ** i)
if complete_fill:
self.assertEqual(len(transactions), len(order_list))
total_volume = 0
for i in range(len(transactions)):
txn = transactions[i]
total_volume += txn.amount
if complete_fill:
order = order_list[i]
self.assertEqual(order.amount, txn.amount)
self.assertEqual(total_volume, expected_txn_volume)
self.assertEqual(len(transactions), expected_txn_count)
cumulative_pos = tracker.position_tracker.positions[asset1]
if total_volume == 0:
self.assertIsNone(cumulative_pos)
else:
self.assertEqual(total_volume, cumulative_pos.amount)
# the open orders should not contain the asset.
oo = blotter.open_orders
self.assertNotIn(
asset1,
oo,
"Entry is removed when no open orders"
)
def test_blotter_processes_splits(self):
blotter = Blotter('daily', equity_slippage=FixedSlippage())
# set up two open limit orders with very low limit prices,
# one for sid 1 and one for sid 2
asset1 = self.asset_finder.retrieve_asset(1)
asset2 = self.asset_finder.retrieve_asset(2)
asset133 = self.asset_finder.retrieve_asset(133)
blotter.order(asset1, 100, LimitOrder(10))
blotter.order(asset2, 100, LimitOrder(10))
# send in splits for assets 133 and 2. We have no open orders for
# asset 133 so it should be ignored.
blotter.process_splits([(asset133, 0.5), (asset2, 0.3333)])
for asset in [asset1, asset2]:
order_lists = blotter.open_orders[asset]
self.assertIsNotNone(order_lists)
self.assertEqual(1, len(order_lists))
asset1_order = blotter.open_orders[1][0]
asset2_order = blotter.open_orders[2][0]
# make sure the asset1 order didn't change
self.assertEqual(100, asset1_order.amount)
self.assertEqual(10, asset1_order.limit)
self.assertEqual(1, asset1_order.asset)
# make sure the asset2 order did change
# to 300 shares at 3.33
self.assertEqual(300, asset2_order.amount)
self.assertEqual(3.33, asset2_order.limit)
self.assertEqual(2, asset2_order.asset)
class TradingEnvironmentTestCase(WithLogger,
WithTradingEnvironment,
ZiplineTestCase):
"""
Tests for date management utilities in zipline.finance.trading.
"""
def test_simulation_parameters(self):
sp = SimulationParameters(
start_session=pd.Timestamp("2008-01-01", tz='UTC'),
end_session=pd.Timestamp("2008-12-31", tz='UTC'),
capital_base=100000,
trading_calendar=self.trading_calendar,
)
self.assertTrue(sp.last_close.month == 12)
self.assertTrue(sp.last_close.day == 31)
@timed(DEFAULT_TIMEOUT)
def test_sim_params_days_in_period(self):
# January 2008
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5
# 6 7 8 9 10 11 12
# 13 14 15 16 17 18 19
# 20 21 22 23 24 25 26
# 27 28 29 30 31
params = SimulationParameters(
start_session=pd.Timestamp("2007-12-31", tz='UTC'),
end_session=pd.Timestamp("2008-01-07", tz='UTC'),
capital_base=100000,
trading_calendar=self.trading_calendar,
)
expected_trading_days = (
datetime(2007, 12, 31, tzinfo=pytz.utc),
# Skip new years
# holidays taken from: http://www.nyse.com/press/1191407641943.html
datetime(2008, 1, 2, tzinfo=pytz.utc),
datetime(2008, 1, 3, tzinfo=pytz.utc),
datetime(2008, 1, 4, tzinfo=pytz.utc),
# Skip Saturday
# Skip Sunday
datetime(2008, 1, 7, tzinfo=pytz.utc)
)
num_expected_trading_days = 5
self.assertEquals(
num_expected_trading_days,
len(params.sessions)
)
np.testing.assert_array_equal(expected_trading_days,
params.sessions.tolist())
| apache-2.0 |
gamahead/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/colorbar.py | 69 | 27260 | '''
Colorbar toolkit with two classes and a function:
:class:`ColorbarBase`
the base class with full colorbar drawing functionality.
It can be used as-is to make a colorbar for a given colormap;
a mappable object (e.g., image) is not needed.
:class:`Colorbar`
the derived class for use with images or contour plots.
:func:`make_axes`
a function for resizing an axes and adding a second axes
suitable for a colorbar
The :meth:`~matplotlib.figure.Figure.colorbar` method uses :func:`make_axes`
and :class:`Colorbar`; the :func:`~matplotlib.pyplot.colorbar` function
is a thin wrapper over :meth:`~matplotlib.figure.Figure.colorbar`.
'''
import numpy as np
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.ticker as ticker
import matplotlib.cbook as cbook
import matplotlib.lines as lines
import matplotlib.patches as patches
import matplotlib.collections as collections
import matplotlib.contour as contour
make_axes_kw_doc = '''
========== ====================================================
Property Description
========== ====================================================
*fraction* 0.15; fraction of original axes to use for colorbar
*pad* 0.05 if vertical, 0.15 if horizontal; fraction
of original axes between colorbar and new image axes
*shrink* 1.0; fraction by which to shrink the colorbar
*aspect* 20; ratio of long to short dimensions
========== ====================================================
'''
colormap_kw_doc = '''
=========== ====================================================
Property Description
=========== ====================================================
*extend* [ 'neither' | 'both' | 'min' | 'max' ]
If not 'neither', make pointed end(s) for out-of-
range values. These are set for a given colormap
using the colormap set_under and set_over methods.
*spacing* [ 'uniform' | 'proportional' ]
Uniform spacing gives each discrete color the same
space; proportional makes the space proportional to
the data interval.
*ticks* [ None | list of ticks | Locator object ]
If None, ticks are determined automatically from the
input.
*format* [ None | format string | Formatter object ]
If None, the
:class:`~matplotlib.ticker.ScalarFormatter` is used.
If a format string is given, e.g. '%.3f', that is
used. An alternative
:class:`~matplotlib.ticker.Formatter` object may be
given instead.
*drawedges* [ False | True ] If true, draw lines at color
boundaries.
=========== ====================================================
The following will probably be useful only in the context of
indexed colors (that is, when the mappable has norm=NoNorm()),
or other unusual circumstances.
============ ===================================================
Property Description
============ ===================================================
*boundaries* None or a sequence
*values* None or a sequence which must be of length 1 less
than the sequence of *boundaries*. For each region
delimited by adjacent entries in *boundaries*, the
color mapped to the corresponding value in values
will be used.
============ ===================================================
'''
colorbar_doc = '''
Add a colorbar to a plot.
Function signatures for the :mod:`~matplotlib.pyplot` interface; all
but the first are also method signatures for the
:meth:`~matplotlib.figure.Figure.colorbar` method::
colorbar(**kwargs)
colorbar(mappable, **kwargs)
colorbar(mappable, cax=cax, **kwargs)
colorbar(mappable, ax=ax, **kwargs)
arguments:
*mappable*
the :class:`~matplotlib.image.Image`,
:class:`~matplotlib.contour.ContourSet`, etc. to
which the colorbar applies; this argument is mandatory for the
:meth:`~matplotlib.figure.Figure.colorbar` method but optional for the
:func:`~matplotlib.pyplot.colorbar` function, which sets the
default to the current image.
keyword arguments:
*cax*
None | axes object into which the colorbar will be drawn
*ax*
None | parent axes object from which space for a new
colorbar axes will be stolen
Additional keyword arguments are of two kinds:
axes properties:
%s
colorbar properties:
%s
If *mappable* is a :class:`~matplotlib.contours.ContourSet`, its *extend*
kwarg is included automatically.
Note that the *shrink* kwarg provides a simple way to keep a vertical
colorbar, for example, from being taller than the axes of the mappable
to which the colorbar is attached; but it is a manual method requiring
some trial and error. If the colorbar is too tall (or a horizontal
colorbar is too wide) use a smaller value of *shrink*.
For more precise control, you can manually specify the positions of
the axes objects in which the mappable and the colorbar are drawn. In
this case, do not use any of the axes properties kwargs.
returns:
:class:`~matplotlib.colorbar.Colorbar` instance; see also its base class,
:class:`~matplotlib.colorbar.ColorbarBase`. Call the
:meth:`~matplotlib.colorbar.ColorbarBase.set_label` method
to label the colorbar.
''' % (make_axes_kw_doc, colormap_kw_doc)
class ColorbarBase(cm.ScalarMappable):
'''
Draw a colorbar in an existing axes.
This is a base class for the :class:`Colorbar` class, which is the
basis for the :func:`~matplotlib.pyplot.colorbar` method and pylab
function.
It is also useful by itself for showing a colormap. If the *cmap*
kwarg is given but *boundaries* and *values* are left as None,
then the colormap will be displayed on a 0-1 scale. To show the
under- and over-value colors, specify the *norm* as::
colors.Normalize(clip=False)
To show the colors versus index instead of on the 0-1 scale,
use::
norm=colors.NoNorm.
Useful attributes:
:attr:`ax`
the Axes instance in which the colorbar is drawn
:attr:`lines`
a LineCollection if lines were drawn, otherwise None
:attr:`dividers`
a LineCollection if *drawedges* is True, otherwise None
Useful public methods are :meth:`set_label` and :meth:`add_lines`.
'''
_slice_dict = {'neither': slice(0,1000000),
'both': slice(1,-1),
'min': slice(1,1000000),
'max': slice(0,-1)}
def __init__(self, ax, cmap=None,
norm=None,
alpha=1.0,
values=None,
boundaries=None,
orientation='vertical',
extend='neither',
spacing='uniform', # uniform or proportional
ticks=None,
format=None,
drawedges=False,
filled=True,
):
self.ax = ax
if cmap is None: cmap = cm.get_cmap()
if norm is None: norm = colors.Normalize()
self.alpha = alpha
cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm)
self.values = values
self.boundaries = boundaries
self.extend = extend
self._inside = self._slice_dict[extend]
self.spacing = spacing
self.orientation = orientation
self.drawedges = drawedges
self.filled = filled
self.solids = None
self.lines = None
self.dividers = None
self.set_label('')
if cbook.iterable(ticks):
self.locator = ticker.FixedLocator(ticks, nbins=len(ticks))
else:
self.locator = ticks # Handle default in _ticker()
if format is None:
if isinstance(self.norm, colors.LogNorm):
self.formatter = ticker.LogFormatter()
else:
self.formatter = ticker.ScalarFormatter()
elif cbook.is_string_like(format):
self.formatter = ticker.FormatStrFormatter(format)
else:
self.formatter = format # Assume it is a Formatter
# The rest is in a method so we can recalculate when clim changes.
self.draw_all()
def draw_all(self):
'''
Calculate any free parameters based on the current cmap and norm,
and do all the drawing.
'''
self._process_values()
self._find_range()
X, Y = self._mesh()
C = self._values[:,np.newaxis]
self._config_axes(X, Y)
if self.filled:
self._add_solids(X, Y, C)
self._set_label()
def _config_axes(self, X, Y):
'''
Make an axes patch and outline.
'''
ax = self.ax
ax.set_frame_on(False)
ax.set_navigate(False)
xy = self._outline(X, Y)
ax.update_datalim(xy)
ax.set_xlim(*ax.dataLim.intervalx)
ax.set_ylim(*ax.dataLim.intervaly)
self.outline = lines.Line2D(xy[:, 0], xy[:, 1], color=mpl.rcParams['axes.edgecolor'],
linewidth=mpl.rcParams['axes.linewidth'])
ax.add_artist(self.outline)
self.outline.set_clip_box(None)
self.outline.set_clip_path(None)
c = mpl.rcParams['axes.facecolor']
self.patch = patches.Polygon(xy, edgecolor=c,
facecolor=c,
linewidth=0.01,
zorder=-1)
ax.add_artist(self.patch)
ticks, ticklabels, offset_string = self._ticker()
if self.orientation == 'vertical':
ax.set_xticks([])
ax.yaxis.set_label_position('right')
ax.yaxis.set_ticks_position('right')
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.yaxis.get_major_formatter().set_offset_string(offset_string)
else:
ax.set_yticks([])
ax.xaxis.set_label_position('bottom')
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
ax.xaxis.get_major_formatter().set_offset_string(offset_string)
def _set_label(self):
if self.orientation == 'vertical':
self.ax.set_ylabel(self._label, **self._labelkw)
else:
self.ax.set_xlabel(self._label, **self._labelkw)
def set_label(self, label, **kw):
'''
Label the long axis of the colorbar
'''
self._label = label
self._labelkw = kw
self._set_label()
def _outline(self, X, Y):
'''
Return *x*, *y* arrays of colorbar bounding polygon,
taking orientation into account.
'''
N = X.shape[0]
ii = [0, 1, N-2, N-1, 2*N-1, 2*N-2, N+1, N, 0]
x = np.take(np.ravel(np.transpose(X)), ii)
y = np.take(np.ravel(np.transpose(Y)), ii)
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if self.orientation == 'horizontal':
return np.hstack((y, x))
return np.hstack((x, y))
def _edges(self, X, Y):
'''
Return the separator line segments; helper for _add_solids.
'''
N = X.shape[0]
# Using the non-array form of these line segments is much
# simpler than making them into arrays.
if self.orientation == 'vertical':
return [zip(X[i], Y[i]) for i in range(1, N-1)]
else:
return [zip(Y[i], X[i]) for i in range(1, N-1)]
def _add_solids(self, X, Y, C):
'''
Draw the colors using :meth:`~matplotlib.axes.Axes.pcolor`;
optionally add separators.
'''
## Change to pcolorfast after fixing bugs in some backends...
if self.orientation == 'vertical':
args = (X, Y, C)
else:
args = (np.transpose(Y), np.transpose(X), np.transpose(C))
kw = {'cmap':self.cmap, 'norm':self.norm,
'shading':'flat', 'alpha':self.alpha}
# Save, set, and restore hold state to keep pcolor from
# clearing the axes. Ordinarily this will not be needed,
# since the axes object should already have hold set.
_hold = self.ax.ishold()
self.ax.hold(True)
col = self.ax.pcolor(*args, **kw)
self.ax.hold(_hold)
#self.add_observer(col) # We should observe, not be observed...
self.solids = col
if self.drawedges:
self.dividers = collections.LineCollection(self._edges(X,Y),
colors=(mpl.rcParams['axes.edgecolor'],),
linewidths=(0.5*mpl.rcParams['axes.linewidth'],)
)
self.ax.add_collection(self.dividers)
def add_lines(self, levels, colors, linewidths):
'''
Draw lines on the colorbar.
'''
N = len(levels)
dummy, y = self._locate(levels)
if len(y) <> N:
raise ValueError("levels are outside colorbar range")
x = np.array([0.0, 1.0])
X, Y = np.meshgrid(x,y)
if self.orientation == 'vertical':
xy = [zip(X[i], Y[i]) for i in range(N)]
else:
xy = [zip(Y[i], X[i]) for i in range(N)]
col = collections.LineCollection(xy, linewidths=linewidths)
self.lines = col
col.set_color(colors)
self.ax.add_collection(col)
def _ticker(self):
'''
Return two sequences: ticks (colorbar data locations)
and ticklabels (strings).
'''
locator = self.locator
formatter = self.formatter
if locator is None:
if self.boundaries is None:
if isinstance(self.norm, colors.NoNorm):
nv = len(self._values)
base = 1 + int(nv/10)
locator = ticker.IndexLocator(base=base, offset=0)
elif isinstance(self.norm, colors.BoundaryNorm):
b = self.norm.boundaries
locator = ticker.FixedLocator(b, nbins=10)
elif isinstance(self.norm, colors.LogNorm):
locator = ticker.LogLocator()
else:
locator = ticker.MaxNLocator()
else:
b = self._boundaries[self._inside]
locator = ticker.FixedLocator(b, nbins=10)
if isinstance(self.norm, colors.NoNorm):
intv = self._values[0], self._values[-1]
else:
intv = self.vmin, self.vmax
locator.create_dummy_axis()
formatter.create_dummy_axis()
locator.set_view_interval(*intv)
locator.set_data_interval(*intv)
formatter.set_view_interval(*intv)
formatter.set_data_interval(*intv)
b = np.array(locator())
b, ticks = self._locate(b)
formatter.set_locs(b)
ticklabels = [formatter(t, i) for i, t in enumerate(b)]
offset_string = formatter.get_offset()
return ticks, ticklabels, offset_string
def _process_values(self, b=None):
'''
Set the :attr:`_boundaries` and :attr:`_values` attributes
based on the input boundaries and values. Input boundaries
can be *self.boundaries* or the argument *b*.
'''
if b is None:
b = self.boundaries
if b is not None:
self._boundaries = np.asarray(b, dtype=float)
if self.values is None:
self._values = 0.5*(self._boundaries[:-1]
+ self._boundaries[1:])
if isinstance(self.norm, colors.NoNorm):
self._values = (self._values + 0.00001).astype(np.int16)
return
self._values = np.array(self.values)
return
if self.values is not None:
self._values = np.array(self.values)
if self.boundaries is None:
b = np.zeros(len(self.values)+1, 'd')
b[1:-1] = 0.5*(self._values[:-1] - self._values[1:])
b[0] = 2.0*b[1] - b[2]
b[-1] = 2.0*b[-2] - b[-3]
self._boundaries = b
return
self._boundaries = np.array(self.boundaries)
return
# Neither boundaries nor values are specified;
# make reasonable ones based on cmap and norm.
if isinstance(self.norm, colors.NoNorm):
b = self._uniform_y(self.cmap.N+1) * self.cmap.N - 0.5
v = np.zeros((len(b)-1,), dtype=np.int16)
v[self._inside] = np.arange(self.cmap.N, dtype=np.int16)
if self.extend in ('both', 'min'):
v[0] = -1
if self.extend in ('both', 'max'):
v[-1] = self.cmap.N
self._boundaries = b
self._values = v
return
elif isinstance(self.norm, colors.BoundaryNorm):
b = list(self.norm.boundaries)
if self.extend in ('both', 'min'):
b = [b[0]-1] + b
if self.extend in ('both', 'max'):
b = b + [b[-1] + 1]
b = np.array(b)
v = np.zeros((len(b)-1,), dtype=float)
bi = self.norm.boundaries
v[self._inside] = 0.5*(bi[:-1] + bi[1:])
if self.extend in ('both', 'min'):
v[0] = b[0] - 1
if self.extend in ('both', 'max'):
v[-1] = b[-1] + 1
self._boundaries = b
self._values = v
return
else:
if not self.norm.scaled():
self.norm.vmin = 0
self.norm.vmax = 1
b = self.norm.inverse(self._uniform_y(self.cmap.N+1))
if self.extend in ('both', 'min'):
b[0] = b[0] - 1
if self.extend in ('both', 'max'):
b[-1] = b[-1] + 1
self._process_values(b)
def _find_range(self):
'''
Set :attr:`vmin` and :attr:`vmax` attributes to the first and
last boundary excluding extended end boundaries.
'''
b = self._boundaries[self._inside]
self.vmin = b[0]
self.vmax = b[-1]
def _central_N(self):
'''number of boundaries **before** extension of ends'''
nb = len(self._boundaries)
if self.extend == 'both':
nb -= 2
elif self.extend in ('min', 'max'):
nb -= 1
return nb
def _extended_N(self):
'''
Based on the colormap and extend variable, return the
number of boundaries.
'''
N = self.cmap.N + 1
if self.extend == 'both':
N += 2
elif self.extend in ('min', 'max'):
N += 1
return N
def _uniform_y(self, N):
'''
Return colorbar data coordinates for *N* uniformly
spaced boundaries, plus ends if required.
'''
if self.extend == 'neither':
y = np.linspace(0, 1, N)
else:
if self.extend == 'both':
y = np.zeros(N + 2, 'd')
y[0] = -0.05
y[-1] = 1.05
elif self.extend == 'min':
y = np.zeros(N + 1, 'd')
y[0] = -0.05
else:
y = np.zeros(N + 1, 'd')
y[-1] = 1.05
y[self._inside] = np.linspace(0, 1, N)
return y
def _proportional_y(self):
'''
Return colorbar data coordinates for the boundaries of
a proportional colorbar.
'''
if isinstance(self.norm, colors.BoundaryNorm):
b = self._boundaries[self._inside]
y = (self._boundaries - self._boundaries[0])
y = y / (self._boundaries[-1] - self._boundaries[0])
else:
y = self.norm(self._boundaries.copy())
if self.extend in ('both', 'min'):
y[0] = -0.05
if self.extend in ('both', 'max'):
y[-1] = 1.05
yi = y[self._inside]
norm = colors.Normalize(yi[0], yi[-1])
y[self._inside] = norm(yi)
return y
def _mesh(self):
'''
Return X,Y, the coordinate arrays for the colorbar pcolormesh.
These are suitable for a vertical colorbar; swapping and
transposition for a horizontal colorbar are done outside
this function.
'''
x = np.array([0.0, 1.0])
if self.spacing == 'uniform':
y = self._uniform_y(self._central_N())
else:
y = self._proportional_y()
self._y = y
X, Y = np.meshgrid(x,y)
if self.extend in ('min', 'both'):
X[0,:] = 0.5
if self.extend in ('max', 'both'):
X[-1,:] = 0.5
return X, Y
def _locate(self, x):
'''
Given a possible set of color data values, return the ones
within range, together with their corresponding colorbar
data coordinates.
'''
if isinstance(self.norm, (colors.NoNorm, colors.BoundaryNorm)):
b = self._boundaries
xn = x
xout = x
else:
# Do calculations using normalized coordinates so
# as to make the interpolation more accurate.
b = self.norm(self._boundaries, clip=False).filled()
# We do our own clipping so that we can allow a tiny
# bit of slop in the end point ticks to allow for
# floating point errors.
xn = self.norm(x, clip=False).filled()
in_cond = (xn > -0.001) & (xn < 1.001)
xn = np.compress(in_cond, xn)
xout = np.compress(in_cond, x)
# The rest is linear interpolation with clipping.
y = self._y
N = len(b)
ii = np.minimum(np.searchsorted(b, xn), N-1)
i0 = np.maximum(ii - 1, 0)
#db = b[ii] - b[i0]
db = np.take(b, ii) - np.take(b, i0)
db = np.where(i0==ii, 1.0, db)
#dy = y[ii] - y[i0]
dy = np.take(y, ii) - np.take(y, i0)
z = np.take(y, i0) + (xn-np.take(b,i0))*dy/db
return xout, z
def set_alpha(self, alpha):
self.alpha = alpha
class Colorbar(ColorbarBase):
def __init__(self, ax, mappable, **kw):
mappable.autoscale_None() # Ensure mappable.norm.vmin, vmax
# are set when colorbar is called,
# even if mappable.draw has not yet
# been called. This will not change
# vmin, vmax if they are already set.
self.mappable = mappable
kw['cmap'] = mappable.cmap
kw['norm'] = mappable.norm
kw['alpha'] = mappable.get_alpha()
if isinstance(mappable, contour.ContourSet):
CS = mappable
kw['boundaries'] = CS._levels
kw['values'] = CS.cvalues
kw['extend'] = CS.extend
#kw['ticks'] = CS._levels
kw.setdefault('ticks', ticker.FixedLocator(CS.levels, nbins=10))
kw['filled'] = CS.filled
ColorbarBase.__init__(self, ax, **kw)
if not CS.filled:
self.add_lines(CS)
else:
ColorbarBase.__init__(self, ax, **kw)
def add_lines(self, CS):
'''
Add the lines from a non-filled
:class:`~matplotlib.contour.ContourSet` to the colorbar.
'''
if not isinstance(CS, contour.ContourSet) or CS.filled:
raise ValueError('add_lines is only for a ContourSet of lines')
tcolors = [c[0] for c in CS.tcolors]
tlinewidths = [t[0] for t in CS.tlinewidths]
# The following was an attempt to get the colorbar lines
# to follow subsequent changes in the contour lines,
# but more work is needed: specifically, a careful
# look at event sequences, and at how
# to make one object track another automatically.
#tcolors = [col.get_colors()[0] for col in CS.collections]
#tlinewidths = [col.get_linewidth()[0] for lw in CS.collections]
#print 'tlinewidths:', tlinewidths
ColorbarBase.add_lines(self, CS.levels, tcolors, tlinewidths)
def update_bruteforce(self, mappable):
'''
Manually change any contour line colors. This is called
when the image or contour plot to which this colorbar belongs
is changed.
'''
# We are using an ugly brute-force method: clearing and
# redrawing the whole thing. The problem is that if any
# properties have been changed by methods other than the
# colorbar methods, those changes will be lost.
self.ax.cla()
self.draw_all()
#if self.vmin != self.norm.vmin or self.vmax != self.norm.vmax:
# self.ax.cla()
# self.draw_all()
if isinstance(self.mappable, contour.ContourSet):
CS = self.mappable
if not CS.filled:
self.add_lines(CS)
#if self.lines is not None:
# tcolors = [c[0] for c in CS.tcolors]
# self.lines.set_color(tcolors)
#Fixme? Recalculate boundaries, ticks if vmin, vmax have changed.
#Fixme: Some refactoring may be needed; we should not
# be recalculating everything if there was a simple alpha
# change.
def make_axes(parent, **kw):
orientation = kw.setdefault('orientation', 'vertical')
fraction = kw.pop('fraction', 0.15)
shrink = kw.pop('shrink', 1.0)
aspect = kw.pop('aspect', 20)
#pb = transforms.PBox(parent.get_position())
pb = parent.get_position(original=True).frozen()
if orientation == 'vertical':
pad = kw.pop('pad', 0.05)
x1 = 1.0-fraction
pb1, pbx, pbcb = pb.splitx(x1-pad, x1)
pbcb = pbcb.shrunk(1.0, shrink).anchored('C', pbcb)
anchor = (0.0, 0.5)
panchor = (1.0, 0.5)
else:
pad = kw.pop('pad', 0.15)
pbcb, pbx, pb1 = pb.splity(fraction, fraction+pad)
pbcb = pbcb.shrunk(shrink, 1.0).anchored('C', pbcb)
aspect = 1.0/aspect
anchor = (0.5, 1.0)
panchor = (0.5, 0.0)
parent.set_position(pb1)
parent.set_anchor(panchor)
fig = parent.get_figure()
cax = fig.add_axes(pbcb)
cax.set_aspect(aspect, anchor=anchor, adjustable='box')
return cax, kw
make_axes.__doc__ ='''
Resize and reposition a parent axes, and return a child
axes suitable for a colorbar::
cax, kw = make_axes(parent, **kw)
Keyword arguments may include the following (with defaults):
*orientation*
'vertical' or 'horizontal'
%s
All but the first of these are stripped from the input kw set.
Returns (cax, kw), the child axes and the reduced kw dictionary.
''' % make_axes_kw_doc
| gpl-3.0 |
selective-inference/selective-inference | doc/learning_examples/stability/stability_selection_harder_big.py | 2 | 3284 | import functools, uuid
import numpy as np, pandas as pd
from scipy.stats import norm as ndist
import regreg.api as rr
from selection.tests.instance import gaussian_instance
from selection.learning.utils import full_model_inference, pivot_plot
from selection.learning.core import split_sampler, keras_fit
from sklearn.linear_model import lasso_path
def simulate(n=2000, p=1000, s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=2000):
# description of statistical problem
X, y, truth = gaussian_instance(n=n,
p=p,
s=s,
equicorrelated=False,
rho=0.1,
sigma=sigma,
signal=signal,
random_signs=True,
scale=True)[:3]
dispersion = sigma**2
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
splitting_sampler = split_sampler(X * y[:, None], covS)
def meta_algorithm(XTX, XTXi, sampler):
min_success = 6
ntries = 10
def _alpha_grid(X, y, center, XTX):
n, p = X.shape
alphas, coefs, _ = lasso_path(X, y, Xy=center, precompute=XTX)
nselected = np.count_nonzero(coefs, axis=0)
return alphas[nselected < np.sqrt(0.8 * p)]
alpha_grid = _alpha_grid(X, y, sampler(scale=0.), XTX)
success = np.zeros((p, alpha_grid.shape[0]))
for _ in range(ntries):
scale = 1. # corresponds to sub-samples of 50%
noisy_S = sampler(scale=scale)
_, coefs, _ = lasso_path(X, y, Xy = noisy_S, precompute=XTX, alphas=alpha_grid)
success += np.abs(np.sign(coefs))
selected = np.apply_along_axis(lambda row: any(x>min_success for x in row), 1, success)
vars = set(np.nonzero(selected)[0])
return vars
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = np.linalg.norm(resid)**2 / (n-p)
selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi)
# run selection algorithm
return full_model_inference(X,
y,
truth,
selection_algorithm,
splitting_sampler,
success_params=(1, 1),
B=B,
fit_probability=keras_fit,
fit_args={'epochs':10, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'})
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
for i in range(500):
df = simulate(B=3000)
csvfile = 'stability_selection_harder_big.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try: # concatenate to disk
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, length_ax = pivot_plot(df, outbase)
| bsd-3-clause |
benoitsteiner/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/tensorflow_dataframe.py | 20 | 29376 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import batch
from tensorflow.contrib.learn.python.learn.dataframe.transforms import csv_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import example_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import reader_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import sparsify
from tensorflow.contrib.learn.python.learn.dataframe.transforms import split_mask
from tensorflow.python.client import session as sess
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner as qr
def _expand_file_names(filepatterns):
"""Takes a list of file patterns and returns a list of resolved file names."""
if not isinstance(filepatterns, (list, tuple, set)):
filepatterns = [filepatterns]
filenames = set()
for filepattern in filepatterns:
names = set(gfile.Glob(filepattern))
filenames |= names
return list(filenames)
def _dtype_to_nan(dtype):
if dtype is dtypes.string:
return b""
elif dtype.is_integer:
return np.nan
elif dtype.is_floating:
return np.nan
elif dtype is dtypes.bool:
return np.nan
else:
raise ValueError("Can't parse type without NaN into sparse tensor: %s" %
dtype)
def _get_default_value(feature_spec):
if isinstance(feature_spec, parsing_ops.FixedLenFeature):
return feature_spec.default_value
else:
return _dtype_to_nan(feature_spec.dtype)
class TensorFlowDataFrame(df.DataFrame):
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
def run(self,
num_batches=None,
graph=None,
session=None,
start_queues=True,
initialize_variables=True,
**kwargs):
"""Builds and runs the columns of the `DataFrame` and yields batches.
This is a generator that yields a dictionary mapping column names to
evaluated columns.
Args:
num_batches: the maximum number of batches to produce. If none specified,
the returned value will iterate through infinite batches.
graph: the `Graph` in which the `DataFrame` should be built.
session: the `Session` in which to run the columns of the `DataFrame`.
start_queues: if true, queues will be started before running and halted
after producing `n` batches.
initialize_variables: if true, variables will be initialized.
**kwargs: Additional keyword arguments e.g. `num_epochs`.
Yields:
A dictionary, mapping column names to the values resulting from running
each column for a single batch.
"""
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
if session is None:
session = sess.Session()
self_built = self.build(**kwargs)
keys = list(self_built.keys())
cols = list(self_built.values())
if initialize_variables:
if variables.local_variables():
session.run(variables.local_variables_initializer())
if variables.global_variables():
session.run(variables.global_variables_initializer())
if start_queues:
coord = coordinator.Coordinator()
threads = qr.start_queue_runners(sess=session, coord=coord)
i = 0
while num_batches is None or i < num_batches:
i += 1
try:
values = session.run(cols)
yield collections.OrderedDict(zip(keys, values))
except errors.OutOfRangeError:
break
if start_queues:
coord.request_stop()
coord.join(threads)
def select_rows(self, boolean_series):
"""Returns a `DataFrame` with only the rows indicated by `boolean_series`.
Note that batches may no longer have consistent size after calling
`select_rows`, so the new `DataFrame` may need to be rebatched.
For example:
'''
filtered_df = df.select_rows(df["country"] == "jp").batch(64)
'''
Args:
boolean_series: a `Series` that evaluates to a boolean `Tensor`.
Returns:
A new `DataFrame` with the same columns as `self`, but selecting only the
rows where `boolean_series` evaluated to `True`.
"""
result = type(self)()
for key, col in self._columns.items():
try:
result[key] = col.select_rows(boolean_series)
except AttributeError as e:
raise NotImplementedError((
"The select_rows method is not implemented for Series type {}. "
"Original error: {}").format(type(col), e))
return result
def split(self, index_series, proportion, batch_size=None):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
left_rows = self.select_rows(left_mask)
right_rows = self.select_rows(right_mask)
if batch_size:
left_rows = left_rows.batch(batch_size=batch_size, shuffle=False)
right_rows = right_rows.batch(batch_size=batch_size, shuffle=False)
return left_rows, right_rows
def split_fast(self, index_series, proportion, batch_size,
base_batch_size=1000):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
base_batch_size: the batch size to use for materialized data, prior to the
split.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
if isinstance(index_series, str):
index_series = self[index_series]
left_mask, = split_mask.SplitMask(proportion)(index_series)
right_mask = ~left_mask
self["left_mask__"] = left_mask
self["right_mask__"] = right_mask
# TODO(soergel): instead of base_batch_size can we just do one big batch?
# avoid computing the hashes twice
m = self.materialize_to_memory(batch_size=base_batch_size)
left_rows_df = m.select_rows(m["left_mask__"])
right_rows_df = m.select_rows(m["right_mask__"])
del left_rows_df[["left_mask__", "right_mask__"]]
del right_rows_df[["left_mask__", "right_mask__"]]
# avoid recomputing the split repeatedly
left_rows_df = left_rows_df.materialize_to_memory(batch_size=batch_size)
right_rows_df = right_rows_df.materialize_to_memory(batch_size=batch_size)
return left_rows_df, right_rows_df
def run_one_batch(self):
"""Creates a new 'Graph` and `Session` and runs a single batch.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
batch of the `DataFrame`.
"""
return list(self.run(num_batches=1))[0]
def run_one_epoch(self):
"""Creates a new 'Graph` and `Session` and runs a single epoch.
Naturally this makes sense only for DataFrames that fit in memory.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
epoch of the `DataFrame`.
"""
# batches is a list of dicts of numpy arrays
batches = [b for b in self.run(num_epochs=1)]
# first invert that to make a dict of lists of numpy arrays
pivoted_batches = {}
for k in batches[0].keys():
pivoted_batches[k] = []
for b in batches:
for k, v in b.items():
pivoted_batches[k].append(v)
# then concat the arrays in each column
result = {k: np.concatenate(column_batches)
for k, column_batches in pivoted_batches.items()}
return result
def materialize_to_memory(self, batch_size):
unordered_dict_of_arrays = self.run_one_epoch()
# there may already be an 'index' column, in which case from_ordereddict)
# below will complain because it wants to generate a new one.
# for now, just remove it.
# TODO(soergel): preserve index history, potentially many levels deep
del unordered_dict_of_arrays["index"]
# the order of the columns in this dict is arbitrary; we just need it to
# remain consistent.
ordered_dict_of_arrays = collections.OrderedDict(unordered_dict_of_arrays)
return TensorFlowDataFrame.from_ordereddict(ordered_dict_of_arrays,
batch_size=batch_size)
def batch(self,
batch_size,
shuffle=False,
num_threads=1,
queue_capacity=None,
min_after_dequeue=None,
seed=None):
"""Resize the batches in the `DataFrame` to the given `batch_size`.
Args:
batch_size: desired batch size.
shuffle: whether records should be shuffled. Defaults to true.
num_threads: the number of enqueueing threads.
queue_capacity: capacity of the queue that will hold new batches.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` with `batch_size` rows.
"""
column_names = list(self._columns.keys())
if shuffle:
batcher = batch.ShuffleBatch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
seed=seed)
else:
batcher = batch.Batch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity)
batched_series = batcher(list(self._columns.values()))
dataframe = type(self)()
dataframe.assign(**(dict(zip(column_names, batched_series))))
return dataframe
@classmethod
def _from_csv_base(cls, filepatterns, get_default_values, has_header,
column_names, num_threads, enqueue_size,
batch_size, queue_capacity, min_after_dequeue, shuffle,
seed):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
get_default_values: a function that produces a list of default values for
each column, given the column names.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if column_names is None:
if not has_header:
raise ValueError("If column_names is None, has_header must be true.")
with gfile.GFile(filenames[0]) as f:
column_names = csv.DictReader(f).fieldnames
if "index" in column_names:
raise ValueError(
"'index' is reserved and can not be used for a column name.")
default_values = get_default_values(column_names)
reader_kwargs = {"skip_header_lines": (1 if has_header else 0)}
index, value = reader_source.TextFileSource(
filenames,
reader_kwargs=reader_kwargs,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = csv_parser.CSVParser(column_names, default_values)
parsed = parser(value)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_csv(cls,
filepatterns,
default_values,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
default_values: a list of default values for each column.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
# pylint: disable=unused-argument
return default_values
return cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
@classmethod
def from_csv_with_feature_spec(cls,
filepatterns,
feature_spec,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files, given a feature_spec.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
feature_spec: a dict mapping column names to `FixedLenFeature` or
`VarLenFeature`.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
return [_get_default_value(feature_spec[name]) for name in column_names]
dataframe = cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
# replace the dense columns with sparse ones in place in the dataframe
for name in dataframe.columns():
if name != "index" and isinstance(feature_spec[name],
parsing_ops.VarLenFeature):
strip_value = _get_default_value(feature_spec[name])
(dataframe[name],) = sparsify.Sparsify(strip_value)(dataframe[name])
return dataframe
@classmethod
def from_examples(cls,
filepatterns,
features,
reader_cls=io_ops.TFRecordReader,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from `tensorflow.Example`s.
Args:
filepatterns: a list of file patterns containing `tensorflow.Example`s.
features: a dict mapping feature names to `VarLenFeature` or
`FixedLenFeature`.
reader_cls: a subclass of `tensorflow.ReaderBase` that will be used to
read the `Example`s.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with `Example`s from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if "index" in features:
raise ValueError(
"'index' is reserved and can not be used for a feature name.")
index, record = reader_source.ReaderSource(
reader_cls,
filenames,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = example_parser.ExampleParser(features)
parsed = parser(record)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_pandas(cls,
pandas_dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="pandas_data"):
"""Create a `tf.learn.DataFrame` from a `pandas.DataFrame`.
Args:
pandas_dataframe: `pandas.DataFrame` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
`pandas_dataframe`.
"""
pandas_source = in_memory_source.PandasSource(
pandas_dataframe,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(pandas_source()._asdict()))
return dataframe
@classmethod
def from_numpy(cls,
numpy_array,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from a `numpy.ndarray`.
The returned `DataFrame` contains two columns: 'index' and 'value'. The
'value' column contains a row from the array. The 'index' column contains
the corresponding row number.
Args:
numpy_array: `numpy.ndarray` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
array.
"""
numpy_source = in_memory_source.NumpySource(
numpy_array,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
@classmethod
def from_ordereddict(cls,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from an `OrderedDict` of `numpy.ndarray`.
The returned `DataFrame` contains a column for each key of the dict plus an
extra 'index' column. The 'index' column contains the row number. Each of
the other columns contains a row from the corresponding array.
Args:
ordered_dict_of_arrays: `OrderedDict` of `numpy.ndarray` that serves as a
data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given arrays.
Raises:
ValueError: `ordered_dict_of_arrays` contains the reserved name 'index'.
"""
numpy_source = in_memory_source.OrderedDictNumpySource(
ordered_dict_of_arrays,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
| apache-2.0 |
SmokinCaterpillar/pypet | pypet/tests/integration/envSCOOPdebug.py | 2 | 3454 | __author__ = 'robert'
import os
from pypet import LazyStorageService
from pypet.tests.integration.environment_scoop_test import EnvironmentTest, pypetconstants, \
check_nice, unittest
from pypet.tests.integration.environment_test import make_temp_dir, make_trajectory_name, \
random, Environment, get_log_config, Parameter, create_param_dict, add_params
import pypet.tests.testutils.ioutils as tu
tu.testParams['log_config'] = 'debug'
tu.prepare_log_config()
@unittest.skip
class MultiprocSCOOPNetlockTest(EnvironmentTest):
tags = 'integration', 'hdf5', 'environment', 'multiproc', 'netlock', 'scoop'
def compare_trajectories(self,traj1,traj2):
return True
def setUp(self):
self.set_mode()
self.logfolder = make_temp_dir(os.path.join('experiments',
'tests',
'Log'))
random.seed()
self.trajname = make_trajectory_name(self)
self.filename = make_temp_dir(os.path.join('experiments',
'tests',
'HDF5',
'test%s.hdf5' % self.trajname))
env = Environment(trajectory=self.trajname,
storage_service=LazyStorageService,
filename=self.filename,
file_title=self.trajname,
log_stdout=self.log_stdout,
log_config=get_log_config(),
results_per_run=5,
wildcard_functions=self.wildcard_functions,
derived_parameters_per_run=5,
multiproc=self.multiproc,
ncores=self.ncores,
wrap_mode=self.mode,
use_pool=self.use_pool,
gc_interval=self.gc_interval,
freeze_input=self.freeze_input,
fletcher32=self.fletcher32,
complevel=self.complevel,
complib=self.complib,
shuffle=self.shuffle,
pandas_append=self.pandas_append,
pandas_format=self.pandas_format,
encoding=self.encoding,
niceness=self.niceness,
use_scoop=self.use_scoop,
port=self.url)
traj = env.v_trajectory
traj.v_standard_parameter=Parameter
## Create some parameters
self.param_dict={}
create_param_dict(self.param_dict)
### Add some parameter:
add_params(traj,self.param_dict)
#remember the trajectory and the environment
self.traj = traj
self.env = env
def set_mode(self):
super(MultiprocSCOOPNetlockTest, self).set_mode()
self.mode = pypetconstants.WRAP_MODE_NETLOCK
self.multiproc = True
self.freeze_input = False
self.ncores = 4
self.gc_interval = 3
self.niceness = check_nice(1)
self.use_pool=False
self.use_scoop=True
self.url = None
@unittest.skip('Does not work with scoop (fully), because scoop uses main frame.')
def test_niceness(self):
pass | bsd-3-clause |
skdaccess/skdaccess | skdaccess/geo/gldas/data_fetcher.py | 2 | 5690 | # The MIT License (MIT)
# Copyright (c) 2016 Massachusetts Institute of Technology
#
# Authors: Victor Pankratius, Justin Li, Cody Rude
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# """@package GLDAS
# Provides classes for accessing GLDAS data.
# """
# mithagi required Base imports
from skdaccess.framework.data_class import DataFetcherStorage, TableWrapper
from skdaccess.utilities.grace_util import readTellusData, getStartEndDate
# Standard library imports
import os
from ftplib import FTP
import re
from collections import OrderedDict
# 3rd party package imports
import pandas as pd
import numpy as np
class DataFetcher(DataFetcherStorage):
''' Data Fetcher for GLDAS data '''
def __init__(self, ap_paramList, start_date = None, end_date = None, resample = False):
'''
Construct a GLDAS Data Fetcher
@param ap_paramList[geo_point]: Autolist of Geographic location tuples
@param start_date: Beginning date
@param end_date: Ending date
@param resample: Resample the data to daily resolution, leaving NaN's in days without data (Default True)
'''
self.start_date = start_date
self.end_date = end_date
self.resample = resample
super(DataFetcher, self).__init__(ap_paramList)
def output(self):
'''
Create data wrapper of GLDAS data for specified geopoint.
@return GLDAS Data Wrapper
'''
data_file = DataFetcher.getDataLocation('gldas')
if data_file is None:
print("No data available")
return None
geo_point_list = self.ap_paramList[0]()
gldas_data_name = 'Equivalent Water Thickness (cm)'
full_data, metadata = readTellusData(data_file, geo_point_list, 'Latitude','Longitude',
'Water_Thickness', gldas_data_name, 'Time')[:2]
# Get appropriate time range
if self.start_date == None or self.end_date == None:
start_date, end_date = getStartEndDate(full_data)
if self.start_date != None:
start_date = self.start_date
elif type(self.start_date) == str:
start_date = pd.to_datetime(self.start_date)
if self.end_date != None:
end_date = self.end_date
elif type(self.end_date) == str:
end_date == pd.to_datetime(self.end_date)
for label in full_data.keys():
full_data[label] = full_data[label][start_date:end_date]
gldas_unc = pd.Series(np.ones(len(full_data[label]),dtype=np.float) * np.nan, index=full_data[label].index,name="Uncertainty")
full_data[label] = pd.concat([full_data[label], gldas_unc], axis=1)
if self.resample == True:
full_data[label] = full_data[label].reindex(pd.date_range(start_date, end_date))
return(TableWrapper(full_data, default_columns = ['Equivalent Water Thickness (cm)'],
default_error_columns=['Uncertainty']))
@classmethod
def downloadFullDataset(cls, out_file=None, use_file=None):
'''
Download GLDAS data
@param out_file: Output filename for parsed data
@param use_file: Directory of downloaded data. If None, data will be downloaded.
@return Absolute path of parsed data
'''
# No post processing for this data is necessary. If local data is
# specified, just set its location.
if use_file != None:
print('Setting data location for local data')
return os.path.abspath(use_file)
# If no local data, download data from server
print("Downloading GLDAS Land Mass Data")
ftp = FTP("podaac-ftp.jpl.nasa.gov")
ftp.login()
ftp.cwd('allData/tellus/L3/gldas_monthly/netcdf/')
dir_list = list(ftp.nlst(''))
file_list = [file for file in dir_list if re.search('.nc$', file)]
if len(file_list) > 1:
raise ValueError('Too many files found in GLDAS directory')
if out_file == None:
out_file = file_list[0]
ftp.retrbinary('RETR ' + file_list[0], open(''+out_file, 'wb').write)
cls.setDataLocation('gldas', os.path.abspath(file_list[0]))
def __str__(self):
'''
String representation of data fetcher
@return String listing the name and geopoint of data fetcher
'''
return 'GLDAS Data Fetcher' + super(DataFetcher, self).__str__()
| mit |
krischer/mtspec | doc/snippets/figure_1.py | 1 | 1050 | import matplotlib.pyplot as plt
plt.style.use("ggplot")
import numpy as np
from mtspec import mtspec
from mtspec.util import _load_mtdata
data = _load_mtdata('v22_174_series.dat.gz')
# Calculate the spectral estimation.
spec, freq, jackknife, _, _ = mtspec(
data=data, delta=4930.0, time_bandwidth=3.5,
number_of_tapers=5, nfft=312, statistics=True)
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1)
# Plot in thousands of years.
ax1.plot(np.arange(len(data)) * 4.930, data, color='black')
ax1.set_xlim(0, 800)
ax1.set_ylim(-1.0, 1.0)
ax1.set_xlabel("Time [1000 years]")
ax1.set_ylabel("Change in $\delta^{18}O$")
ax2 = fig.add_subplot(2, 1, 2)
ax2.set_yscale('log')
# Convert frequency to Ma.
freq *= 1E6
ax2.plot(freq, spec, color='black')
ax2.fill_between(freq, jackknife[:, 0], jackknife[:, 1],
color="red", alpha=0.3)
ax2.set_xlim(freq[0], freq[-1])
ax2.set_ylim(0.1E1, 1E5)
ax2.set_xlabel("Frequency [c Ma$^{-1}]$")
ax2.set_ylabel("Power Spectral Density ($\delta^{18}O/ca^{-1}$)")
plt.tight_layout()
plt.show()
| gpl-3.0 |
fro391/Investing | YQL/test.py | 1 | 2679 | import pandas as pd
from time import gmtime, strftime
def dataSlicing():
#analyzing with pandas
VPN = pd.read_csv('data\VPN'+'.csv').dropna()
VPN['lastTradeDate'] = pd.to_datetime(VPN['lastTradeDate']).apply(lambda x: x.strftime('%Y%m%d'))
#new columns being created
VPN['Ticker&Date'] = VPN['symbol']+ VPN['lastTradeDate']
VPN['VF%']=VPN['volume']/VPN['float']
VPN['AvgV%']= VPN['volume']/VPN['averageDailyV']
# % increase in price
VPN['priceChange'] = (VPN['lastTrade']-VPN['open'])/VPN['open']
# 50MA%
VPN['50MA%'] = (VPN['lastTrade']-VPN['50DayMA'])/VPN['50DayMA']
#new df
VPN = VPN[['Ticker&Date','EPS','AvgV%','VF%','priceChange','50MA%','float']]
#df for yahoo news counts
NDY = pd.read_csv('C:\Users\Richard\Desktop\Python\Investing\ArticleScrape\data\NewsDateHist.csv')
#df for google news counts
NDG = pd.read_csv('C:\Users\Richard\Desktop\Python\Investing\ArticleScrape\data\NewsDateGooG.csv')
#df for iChart stats
iChart = pd.read_csv('C:\Users\Richard\Desktop\Python\Investing\iChart\iChart.csv')
#merge file for google and yahoo news, maxed to remove duplicates
GYNews = NDY.append(NDG).groupby('Ticker&Date').max()
GYNews.to_csv('C:\Users\Richard\Desktop\Python\Investing\ArticleScrape\data\GoogYahooNewsCount.csv')
#looking up for news count on days of high volume
YahooCount = VPN.merge(NDY, on = 'Ticker&Date', how = 'left')
GoogleCount = VPN.merge(NDG, on = 'Ticker&Date', how = 'left')
#if news count is zero, show zero instead of null
YahooCount.replace('NaN',0,inplace = True)
GoogleCount.replace('NaN',0,inplace = True)
#joining with iChart Data
YahooCount = YahooCount.merge(iChart, on = 'Ticker&Date', how = 'left')
GoogleCount = GoogleCount.merge(iChart, on = 'Ticker&Date', how = 'left')
#take the date where the most number of news comes from either yahoo or google
VPNout = YahooCount.append(GoogleCount).groupby('Ticker&Date').max()
#filtering out columns
#VPNout = VPNout.ix[VPNout['EPS']>-2,['Ticker&Date','EPS','AvgV%','VF%','priceChange','50MA%','Title']]
#VPNout = VPNout.ix[VPNout['EPS']<15,['Ticker&Date','EPS','AvgV%','VF%','priceChange','50MA%','Title']]
#VPNout = VPNout.ix[VPNout['AvgV%']>2,['Ticker&Date','EPS','AvgV%','VF%','priceChange','50MA%','Title']]
#VPNout = VPNout.ix[VPNout['priceChange']>0,['Ticker&Date','EPS','AvgV%','VF%','priceChange','50MA%','Title']]
#VPNout = VPNout.ix[VPNout['50MA%']>0.05,['Ticker&Date','EPS','AvgV%','VF%','priceChange','50MA%','Title']]
#Saving to file
VPNout.to_csv('data\VPNoutput'+strftime("%Y-%m-%d", gmtime())+'.csv')
| gpl-2.0 |
allanino/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_ps.py | 69 | 50262 | """
A PostScript backend, which can produce both PostScript .ps and .eps
"""
from __future__ import division
import glob, math, os, shutil, sys, time
def _fn_name(): return sys._getframe(1).f_code.co_name
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
from tempfile import gettempdir
from cStringIO import StringIO
from matplotlib import verbose, __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.afm import AFM
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, get_realpath_and_stat, \
is_writable_file_like, maxdict
from matplotlib.mlab import quad2cubic
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.ttconv import convert_ttf_to_ps
from matplotlib.mathtext import MathTextParser
from matplotlib._mathtext_data import uni2type1
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib.transforms import IdentityTransform
import numpy as npy
import binascii
import re
try:
set
except NameError:
from sets import Set as set
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
backend_version = 'Level II'
debugPS = 0
papersize = {'letter': (8.5,11),
'legal': (8.5,14),
'ledger': (11,17),
'a0': (33.11,46.81),
'a1': (23.39,33.11),
'a2': (16.54,23.39),
'a3': (11.69,16.54),
'a4': (8.27,11.69),
'a5': (5.83,8.27),
'a6': (4.13,5.83),
'a7': (2.91,4.13),
'a8': (2.07,2.91),
'a9': (1.457,2.05),
'a10': (1.02,1.457),
'b0': (40.55,57.32),
'b1': (28.66,40.55),
'b2': (20.27,28.66),
'b3': (14.33,20.27),
'b4': (10.11,14.33),
'b5': (7.16,10.11),
'b6': (5.04,7.16),
'b7': (3.58,5.04),
'b8': (2.51,3.58),
'b9': (1.76,2.51),
'b10': (1.26,1.76)}
def _get_papertype(w, h):
keys = papersize.keys()
keys.sort()
keys.reverse()
for key in keys:
if key.startswith('l'): continue
pw, ph = papersize[key]
if (w < pw) and (h < ph): return key
else:
return 'a0'
def _num_to_str(val):
if is_string_like(val): return val
ival = int(val)
if val==ival: return str(ival)
s = "%1.3f"%val
s = s.rstrip("0")
s = s.rstrip(".")
return s
def _nums_to_str(*args):
return ' '.join(map(_num_to_str,args))
def quote_ps_string(s):
"Quote dangerous characters of S for use in a PostScript string constant."
s=s.replace("\\", "\\\\")
s=s.replace("(", "\\(")
s=s.replace(")", "\\)")
s=s.replace("'", "\\251")
s=s.replace("`", "\\301")
s=re.sub(r"[^ -~\n]", lambda x: r"\%03o"%ord(x.group()), s)
return s
def seq_allequal(seq1, seq2):
"""
seq1 and seq2 are either None or sequences or numerix arrays
Return True if both are None or both are seqs with identical
elements
"""
if seq1 is None:
return seq2 is None
if seq2 is None:
return False
#ok, neither are None:, assuming iterable
if len(seq1) != len(seq2): return False
return npy.alltrue(npy.equal(seq1, seq2))
class RendererPS(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles.
"""
fontd = maxdict(50)
afmfontd = maxdict(50)
def __init__(self, width, height, pswriter, imagedpi=72):
"""
Although postscript itself is dpi independent, we need to
imform the image code about a requested dpi to generate high
res images and them scale them before embeddin them
"""
RendererBase.__init__(self)
self.width = width
self.height = height
self._pswriter = pswriter
if rcParams['text.usetex']:
self.textcnt = 0
self.psfrag = []
self.imagedpi = imagedpi
if rcParams['path.simplify']:
self.simplify = (width * imagedpi, height * imagedpi)
else:
self.simplify = None
# current renderer state (None=uninitialised)
self.color = None
self.linewidth = None
self.linejoin = None
self.linecap = None
self.linedash = None
self.fontname = None
self.fontsize = None
self.hatch = None
self.image_magnification = imagedpi/72.0
self._clip_paths = {}
self._path_collection_id = 0
self.used_characters = {}
self.mathtext_parser = MathTextParser("PS")
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
realpath, stat_key = get_realpath_and_stat(font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in other.items():
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def set_color(self, r, g, b, store=1):
if (r,g,b) != self.color:
if r==g and r==b:
self._pswriter.write("%1.3f setgray\n"%r)
else:
self._pswriter.write("%1.3f %1.3f %1.3f setrgbcolor\n"%(r,g,b))
if store: self.color = (r,g,b)
def set_linewidth(self, linewidth, store=1):
if linewidth != self.linewidth:
self._pswriter.write("%1.3f setlinewidth\n"%linewidth)
if store: self.linewidth = linewidth
def set_linejoin(self, linejoin, store=1):
if linejoin != self.linejoin:
self._pswriter.write("%d setlinejoin\n"%linejoin)
if store: self.linejoin = linejoin
def set_linecap(self, linecap, store=1):
if linecap != self.linecap:
self._pswriter.write("%d setlinecap\n"%linecap)
if store: self.linecap = linecap
def set_linedash(self, offset, seq, store=1):
if self.linedash is not None:
oldo, oldseq = self.linedash
if seq_allequal(seq, oldseq): return
if seq is not None and len(seq):
s="[%s] %d setdash\n"%(_nums_to_str(*seq), offset)
self._pswriter.write(s)
else:
self._pswriter.write("[] 0 setdash\n")
if store: self.linedash = (offset,seq)
def set_font(self, fontname, fontsize, store=1):
if rcParams['ps.useafm']: return
if (fontname,fontsize) != (self.fontname,self.fontsize):
out = ("/%s findfont\n"
"%1.3f scalefont\n"
"setfont\n" % (fontname,fontsize))
self._pswriter.write(out)
if store: self.fontname = fontname
if store: self.fontsize = fontsize
def set_hatch(self, hatch):
"""
hatch can be one of:
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
X - crossed diagonal
letters can be combined, in which case all the specified
hatchings are done
if same letter repeats, it increases the density of hatching
in that direction
"""
hatches = {'horiz':0, 'vert':0, 'diag1':0, 'diag2':0}
for letter in hatch:
if (letter == '/'): hatches['diag2'] += 1
elif (letter == '\\'): hatches['diag1'] += 1
elif (letter == '|'): hatches['vert'] += 1
elif (letter == '-'): hatches['horiz'] += 1
elif (letter == '+'):
hatches['horiz'] += 1
hatches['vert'] += 1
elif (letter.lower() == 'x'):
hatches['diag1'] += 1
hatches['diag2'] += 1
def do_hatch(angle, density):
if (density == 0): return ""
return """\
gsave
eoclip %s rotate 0.0 0.0 0.0 0.0 setrgbcolor 0 setlinewidth
/hatchgap %d def
pathbbox /hatchb exch def /hatchr exch def /hatcht exch def /hatchl exch def
hatchl cvi hatchgap idiv hatchgap mul
hatchgap
hatchr cvi hatchgap idiv hatchgap mul
{hatcht m 0 hatchb hatcht sub r }
for
stroke
grestore
""" % (angle, 12/density)
self._pswriter.write("gsave\n")
self._pswriter.write(do_hatch(90, hatches['horiz']))
self._pswriter.write(do_hatch(0, hatches['vert']))
self._pswriter.write(do_hatch(45, hatches['diag1']))
self._pswriter.write(do_hatch(-45, hatches['diag2']))
self._pswriter.write("grestore\n")
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
l,b,r,t = texmanager.get_ps_bbox(s, fontsize)
w = (r-l)
h = (t-b)
# TODO: We need a way to get a good baseline from
# text.usetex
return w, h, 0
if ismath:
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width, height, descent
if rcParams['ps.useafm']:
if ismath: s = s[1:-1]
font = self._get_font_afm(prop)
l,b,w,h,d = font.get_str_bbox_and_descent(s)
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
w *= scale
h *= scale
d *= scale
return w, h, d
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
#print s, w, h
return w, h, d
def flipy(self):
'return true if small y numbers are top for renderer'
return False
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afmfontd.get(key)
if font is None:
fname = findfont(prop, fontext='afm')
font = self.afmfontd.get(fname)
if font is None:
font = AFM(file(findfont(prop, fontext='afm')))
self.afmfontd[fname] = font
self.afmfontd[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.fontd.get(key)
if font is None:
fname = findfont(prop)
font = self.fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self.fontd[fname] = font
self.fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _rgba(self, im):
return im.as_rgba_str()
def _rgb(self, im):
h,w,s = im.as_rgba_str()
rgba = npy.fromstring(s, npy.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[:,:,:3]
return h, w, rgb.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = npy.fromstring(rgbat[2], npy.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba_f = rgba.astype(npy.float32)
r = rgba_f[:,:,0]
g = rgba_f[:,:,1]
b = rgba_f[:,:,2]
gray = (r*rc + g*gc + b*bc).astype(npy.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def _hex_lines(self, s, chars_per_line=128):
s = binascii.b2a_hex(s)
nhex = len(s)
lines = []
for i in range(0,nhex,chars_per_line):
limit = min(i+chars_per_line, nhex)
lines.append(s[i:limit])
return lines
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to draw_image.
Allows a backend to have images at a different resolution to other
artists.
"""
return self.image_magnification
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas and y
is the distance from bottom
bbox is a matplotlib.transforms.BBox instance for clipping, or
None
"""
im.flipud_out()
if im.is_grayscale:
h, w, bits = self._gray(im)
imagecmd = "image"
else:
h, w, bits = self._rgb(im)
imagecmd = "false 3 colorimage"
hexlines = '\n'.join(self._hex_lines(bits))
xscale, yscale = (
w/self.image_magnification, h/self.image_magnification)
figh = self.height*72
#print 'values', origin, flipud, figh, h, y
clip = []
if bbox is not None:
clipx,clipy,clipw,cliph = bbox.bounds
clip.append('%s clipbox' % _nums_to_str(clipw, cliph, clipx, clipy))
if clippath is not None:
id = self._get_clip_path(clippath, clippath_trans)
clip.append('%s' % id)
clip = '\n'.join(clip)
#y = figh-(y+h)
ps = """gsave
%(clip)s
%(x)s %(y)s translate
%(xscale)s %(yscale)s scale
/DataString %(w)s string def
%(w)s %(h)s 8 [ %(w)s 0 0 -%(h)s 0 %(h)s ]
{
currentfile DataString readhexstring pop
} bind %(imagecmd)s
%(hexlines)s
grestore
""" % locals()
self._pswriter.write(ps)
# unflip
im.flipud_out()
def _convert_path(self, path, transform, simplify=None):
path = transform.transform_path(path)
ps = []
last_points = None
for points, code in path.iter_segments(simplify):
if code == Path.MOVETO:
ps.append("%g %g m" % tuple(points))
elif code == Path.LINETO:
ps.append("%g %g l" % tuple(points))
elif code == Path.CURVE3:
points = quad2cubic(*(list(last_points[-2:]) + list(points)))
ps.append("%g %g %g %g %g %g c" %
tuple(points[2:]))
elif code == Path.CURVE4:
ps.append("%g %g %g %g %g %g c" % tuple(points))
elif code == Path.CLOSEPOLY:
ps.append("cl")
last_points = points
ps = "\n".join(ps)
return ps
def _get_clip_path(self, clippath, clippath_transform):
id = self._clip_paths.get((clippath, clippath_transform))
if id is None:
id = 'c%x' % len(self._clip_paths)
ps_cmd = ['/%s {' % id]
ps_cmd.append(self._convert_path(clippath, clippath_transform))
ps_cmd.extend(['clip', 'newpath', '} bind def\n'])
self._pswriter.write('\n'.join(ps_cmd))
self._clip_paths[(clippath, clippath_transform)] = id
return id
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a Path instance using the given affine transform.
"""
ps = self._convert_path(path, transform, self.simplify)
self._draw_ps(ps, gc, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draw the markers defined by path at each of the positions in x
and y. path coordinates are points, x and y coords will be
transformed by the transform
"""
if debugPS: self._pswriter.write('% draw_markers \n')
write = self._pswriter.write
if rgbFace:
if rgbFace[0]==rgbFace[1] and rgbFace[0]==rgbFace[2]:
ps_color = '%1.3f setgray' % rgbFace[0]
else:
ps_color = '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace
# construct the generic marker command:
ps_cmd = ['/o {', 'gsave', 'newpath', 'translate'] # dont want the translate to be global
ps_cmd.append(self._convert_path(marker_path, marker_trans))
if rgbFace:
ps_cmd.extend(['gsave', ps_color, 'fill', 'grestore'])
ps_cmd.extend(['stroke', 'grestore', '} bind def'])
tpath = trans.transform_path(path)
for vertices, code in tpath.iter_segments():
if len(vertices):
x, y = vertices[-2:]
ps_cmd.append("%g %g o" % (x, y))
ps = '\n'.join(ps_cmd)
self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)
def draw_path_collection(self, master_transform, cliprect, clippath,
clippath_trans, paths, all_transforms, offsets,
offsetTrans, facecolors, edgecolors, linewidths,
linestyles, antialiaseds, urls):
write = self._pswriter.write
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = 'p%x_%x' % (self._path_collection_id, i)
ps_cmd = ['/%s {' % name,
'newpath', 'translate']
ps_cmd.append(self._convert_path(path, transform))
ps_cmd.extend(['} bind def\n'])
write('\n'.join(ps_cmd))
path_codes.append(name)
for xo, yo, path_id, gc, rgbFace in self._iter_collection(
path_codes, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
ps = "%g %g %s" % (xo, yo, path_id)
self._draw_ps(ps, gc, rgbFace)
self._path_collection_id += 1
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
"""
draw a Text instance
"""
w, h, bl = self.get_text_width_height_descent(s, prop, ismath)
fontsize = prop.get_size_in_points()
corr = 0#w/2*(fontsize-10)/10
pos = _nums_to_str(x-corr, y)
thetext = 'psmarker%d' % self.textcnt
color = '%1.3f,%1.3f,%1.3f'% gc.get_rgb()[:3]
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(
rcParams['font.family'], r'{\rmfamily %s}')
s = fontcmd % s
tex = r'\color[rgb]{%s} %s' % (color, s)
self.psfrag.append(r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
ps = """\
gsave
%(pos)s moveto
(%(thetext)s)
show
grestore
""" % locals()
self._pswriter.write(ps)
self.textcnt += 1
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
draw a Text instance
"""
# local to avoid repeated attribute lookups
write = self._pswriter.write
if debugPS:
write("% text\n")
if ismath=='TeX':
return self.tex(gc, x, y, s, prop, angle)
elif ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
elif isinstance(s, unicode):
return self.draw_unicode(gc, x, y, s, prop, angle)
elif rcParams['ps.useafm']:
font = self._get_font_afm(prop)
l,b,w,h = font.get_str_bbox(s)
fontsize = prop.get_size_in_points()
l *= 0.001*fontsize
b *= 0.001*fontsize
w *= 0.001*fontsize
h *= 0.001*fontsize
if angle==90: l,b = -b, l # todo generalize for arb rotations
pos = _nums_to_str(x-l, y-b)
thetext = '(%s)' % s
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
rotate = '%1.1f rotate' % angle
setcolor = '%1.3f %1.3f %1.3f setrgbcolor' % gc.get_rgb()[:3]
#h = 0
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(pos)s moveto
%(rotate)s
%(thetext)s
%(setcolor)s
show
grestore
""" % locals()
self._draw_ps(ps, gc, None)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points())
write("%s m\n"%_nums_to_str(x,y))
if angle:
write("gsave\n")
write("%s rotate\n"%_num_to_str(angle))
descent = font.get_descent() / 64.0
if descent:
write("0 %s rmoveto\n"%_num_to_str(descent))
write("(%s) show\n"%quote_ps_string(s))
if angle:
write("grestore\n")
def new_gc(self):
return GraphicsContextPS()
def draw_unicode(self, gc, x, y, s, prop, angle):
"""draw a unicode string. ps doesn't have unicode support, so
we have to do this the hard way
"""
if rcParams['ps.useafm']:
self.set_color(*gc.get_rgb())
font = self._get_font_afm(prop)
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
thisx = 0
thisy = font.get_str_bbox_and_descent(s)[4] * scale
last_name = None
lines = []
for c in s:
name = uni2type1.get(ord(c), 'question')
try:
width = font.get_width_from_char_name(name)
except KeyError:
name = 'question'
width = font.get_width_char('?')
if last_name is not None:
kern = font.get_kern_dist_from_name(last_name, name)
else:
kern = 0
last_name = name
thisx += kern * scale
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += width * scale
thetext = "\n".join(lines)
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points())
cmap = font.get_charmap()
lastgind = None
#print 'text', s
lines = []
thisx = 0
thisy = font.get_descent() / 64.0
for c in s:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is None:
ccode = ord('?')
name = '.notdef'
gind = 0
else:
name = font.get_glyph_name(gind)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
lastgind = gind
thisx += kern/64.0
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += glyph.linearHoriAdvance/65536.0
thetext = '\n'.join(lines)
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def draw_mathtext(self, gc,
x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if debugPS:
self._pswriter.write("% mathtext\n")
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
self.set_color(*gc.get_rgb())
thetext = pswriter.getvalue()
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None):
"""
Emit the PostScript sniplet 'ps' with all the attributes from 'gc'
applied. 'ps' must consist of PostScript commands to construct a path.
The fill and/or stroke kwargs can be set to False if the
'ps' string already includes filling and/or stroking, in
which case _draw_ps is just supplying properties and
clipping.
"""
# local variable eliminates all repeated attribute lookups
write = self._pswriter.write
if debugPS and command:
write("% "+command+"\n")
mightstroke = (gc.get_linewidth() > 0.0 and
(len(gc.get_rgb()) <= 3 or gc.get_rgb()[3] != 0.0))
stroke = stroke and mightstroke
fill = (fill and rgbFace is not None and
(len(rgbFace) <= 3 or rgbFace[3] != 0.0))
if mightstroke:
self.set_linewidth(gc.get_linewidth())
jint = gc.get_joinstyle()
self.set_linejoin(jint)
cint = gc.get_capstyle()
self.set_linecap(cint)
self.set_linedash(*gc.get_dashes())
self.set_color(*gc.get_rgb()[:3])
write('gsave\n')
cliprect = gc.get_clip_rectangle()
if cliprect:
x,y,w,h=cliprect.bounds
write('%1.4g %1.4g %1.4g %1.4g clipbox\n' % (w,h,x,y))
clippath, clippath_trans = gc.get_clip_path()
if clippath:
id = self._get_clip_path(clippath, clippath_trans)
write('%s\n' % id)
# Jochen, is the strip necessary? - this could be a honking big string
write(ps.strip())
write("\n")
if fill:
if stroke:
write("gsave\n")
self.set_color(store=0, *rgbFace[:3])
write("fill\ngrestore\n")
else:
self.set_color(store=0, *rgbFace[:3])
write("fill\n")
hatch = gc.get_hatch()
if hatch:
self.set_hatch(hatch)
if stroke:
write("stroke\n")
write("grestore\n")
class GraphicsContextPS(GraphicsContextBase):
def get_capstyle(self):
return {'butt':0,
'round':1,
'projecting':2}[GraphicsContextBase.get_capstyle(self)]
def get_joinstyle(self):
return {'miter':0,
'round':1,
'bevel':2}[GraphicsContextBase.get_joinstyle(self)]
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasPS(thisFig)
manager = FigureManagerPS(canvas, num)
return manager
class FigureCanvasPS(FigureCanvasBase):
def draw(self):
pass
filetypes = {'ps' : 'Postscript',
'eps' : 'Encapsulated Postscript'}
def get_default_filetype(self):
return 'ps'
def print_ps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'ps', *args, **kwargs)
def print_eps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'eps', *args, **kwargs)
def _print_ps(self, outfile, format, *args, **kwargs):
papertype = kwargs.get("papertype", rcParams['ps.papersize'])
papertype = papertype.lower()
if papertype == 'auto':
pass
elif papertype not in papersize:
raise RuntimeError( '%s is not a valid papertype. Use one \
of %s'% (papertype, ', '.join( papersize.keys() )) )
orientation = kwargs.get("orientation", "portrait").lower()
if orientation == 'landscape': isLandscape = True
elif orientation == 'portrait': isLandscape = False
else: raise RuntimeError('Orientation must be "portrait" or "landscape"')
self.figure.set_dpi(72) # Override the dpi kwarg
imagedpi = kwargs.get("dpi", 72)
facecolor = kwargs.get("facecolor", "w")
edgecolor = kwargs.get("edgecolor", "w")
if rcParams['text.usetex']:
self._print_figure_tex(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype)
else:
self._print_figure(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype)
def _print_figure(self, outfile, format, dpi=72, facecolor='w', edgecolor='w',
orientation='portrait', isLandscape=False, papertype=None):
"""
Render the figure to hardcopy. Set the figure patch face and
edge colors. This is useful because some of the GUIs have a
gray figure face color background and you'll probably want to
override this on hardcopy
If outfile is a string, it is interpreted as a file name.
If the extension matches .ep* write encapsulated postscript,
otherwise write a stand-alone PostScript file.
If outfile is a file object, a stand-alone PostScript file is
written into this file object.
"""
isEPSF = format == 'eps'
passed_in_file_object = False
if is_string_like(outfile):
title = outfile
tmpfile = os.path.join(gettempdir(), md5(outfile).hexdigest())
elif is_writable_file_like(outfile):
title = None
tmpfile = os.path.join(gettempdir(), md5(str(hash(outfile))).hexdigest())
passed_in_file_object = True
else:
raise ValueError("outfile must be a path or a file-like object")
fh = file(tmpfile, 'w')
# find the appropriate papertype
width, height = self.figure.get_size_inches()
if papertype == 'auto':
if isLandscape: papertype = _get_papertype(height, width)
else: papertype = _get_papertype(width, height)
if isLandscape: paperHeight, paperWidth = papersize[papertype]
else: paperWidth, paperHeight = papersize[papertype]
if rcParams['ps.usedistiller'] and not papertype == 'auto':
# distillers will improperly clip eps files if the pagesize is
# too small
if width>paperWidth or height>paperHeight:
if isLandscape:
papertype = _get_papertype(height, width)
paperHeight, paperWidth = papersize[papertype]
else:
papertype = _get_papertype(width, height)
paperWidth, paperHeight = papersize[papertype]
# center the figure on the paper
xo = 72*0.5*(paperWidth - width)
yo = 72*0.5*(paperHeight - height)
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
rotation = 0
if isLandscape:
llx, lly, urx, ury = lly, llx, ury, urx
xo, yo = 72*paperHeight - yo, xo
rotation = 90
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
self._pswriter = StringIO()
renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
self.figure.draw(renderer)
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write the PostScript headers
if isEPSF: print >>fh, "%!PS-Adobe-3.0 EPSF-3.0"
else: print >>fh, "%!PS-Adobe-3.0"
if title: print >>fh, "%%Title: "+title
print >>fh, ("%%Creator: matplotlib version "
+__version__+", http://matplotlib.sourceforge.net/")
print >>fh, "%%CreationDate: "+time.ctime(time.time())
print >>fh, "%%Orientation: " + orientation
if not isEPSF: print >>fh, "%%DocumentPaperSizes: "+papertype
print >>fh, "%%%%BoundingBox: %d %d %d %d" % bbox
if not isEPSF: print >>fh, "%%Pages: 1"
print >>fh, "%%EndComments"
Ndict = len(psDefs)
print >>fh, "%%BeginProlog"
if not rcParams['ps.useafm']:
Ndict += len(renderer.used_characters)
print >>fh, "/mpldict %d dict def"%Ndict
print >>fh, "mpldict begin"
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print >>fh, l.strip()
if not rcParams['ps.useafm']:
for font_filename, chars in renderer.used_characters.values():
if len(chars):
font = FT2Font(font_filename)
cmap = font.get_charmap()
glyph_ids = []
for c in chars:
gind = cmap.get(c) or 0
glyph_ids.append(gind)
# The ttf to ps (subsetting) support doesn't work for
# OpenType fonts that are Postscript inside (like the
# STIX fonts). This will simply turn that off to avoid
# errors.
if is_opentype_cff_font(font_filename):
raise RuntimeError("OpenType CFF fonts can not be saved using the internal Postscript backend at this time.\nConsider using the Cairo backend.")
else:
fonttype = rcParams['ps.fonttype']
convert_ttf_to_ps(font_filename, fh, rcParams['ps.fonttype'], glyph_ids)
print >>fh, "end"
print >>fh, "%%EndProlog"
if not isEPSF: print >>fh, "%%Page: 1 1"
print >>fh, "mpldict begin"
#print >>fh, "gsave"
print >>fh, "%s translate"%_nums_to_str(xo, yo)
if rotation: print >>fh, "%d rotate"%rotation
print >>fh, "%s clipbox"%_nums_to_str(width*72, height*72, 0, 0)
# write the figure
print >>fh, self._pswriter.getvalue()
# write the trailer
#print >>fh, "grestore"
print >>fh, "end"
print >>fh, "showpage"
if not isEPSF: print >>fh, "%%EOF"
fh.close()
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if passed_in_file_object:
fh = file(tmpfile)
print >>outfile, fh.read()
else:
shutil.move(tmpfile, outfile)
def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor,
orientation, isLandscape, papertype):
"""
If text.usetex is True in rc, a temporary pair of tex/eps files
are created to allow tex to manage the text layout via the PSFrags
package. These files are processed to yield the final ps or eps file.
"""
isEPSF = format == 'eps'
title = outfile
# write to a temp file, we'll move it to outfile when done
tmpfile = os.path.join(gettempdir(), md5(outfile).hexdigest())
fh = file(tmpfile, 'w')
self.figure.dpi = 72 # ignore the dpi kwarg
width, height = self.figure.get_size_inches()
xo = 0
yo = 0
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
self._pswriter = StringIO()
renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
self.figure.draw(renderer)
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write the Encapsulated PostScript headers
print >>fh, "%!PS-Adobe-3.0 EPSF-3.0"
if title: print >>fh, "%%Title: "+title
print >>fh, ("%%Creator: matplotlib version "
+__version__+", http://matplotlib.sourceforge.net/")
print >>fh, "%%CreationDate: "+time.ctime(time.time())
print >>fh, "%%%%BoundingBox: %d %d %d %d" % bbox
print >>fh, "%%EndComments"
Ndict = len(psDefs)
print >>fh, "%%BeginProlog"
print >>fh, "/mpldict %d dict def"%Ndict
print >>fh, "mpldict begin"
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print >>fh, l.strip()
print >>fh, "end"
print >>fh, "%%EndProlog"
print >>fh, "mpldict begin"
#print >>fh, "gsave"
print >>fh, "%s translate"%_nums_to_str(xo, yo)
print >>fh, "%s clipbox"%_nums_to_str(width*72, height*72, 0, 0)
# write the figure
print >>fh, self._pswriter.getvalue()
# write the trailer
#print >>fh, "grestore"
print >>fh, "end"
print >>fh, "showpage"
fh.close()
if isLandscape: # now we are ready to rotate
isLandscape = True
width, height = height, width
bbox = (lly, llx, ury, urx)
temp_papertype = _get_papertype(width, height)
if papertype=='auto':
papertype = temp_papertype
paperWidth, paperHeight = papersize[temp_papertype]
else:
paperWidth, paperHeight = papersize[papertype]
if (width>paperWidth or height>paperHeight) and isEPSF:
paperWidth, paperHeight = papersize[temp_papertype]
verbose.report('Your figure is too big to fit on %s paper. %s \
paper will be used to prevent clipping.'%(papertype, temp_papertype), 'helpful')
texmanager = renderer.get_texmanager()
font_preamble = texmanager.get_font_preamble()
custom_preamble = texmanager.get_custom_preamble()
convert_psfrags(tmpfile, renderer.psfrag, font_preamble,
custom_preamble, paperWidth, paperHeight,
orientation)
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['text.usetex']:
if False: pass # for debugging
else: gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if isinstance(outfile, file):
fh = file(tmpfile)
print >>outfile, fh.read()
else: shutil.move(tmpfile, outfile)
def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble,
paperWidth, paperHeight, orientation):
"""
When we want to use the LaTeX backend with postscript, we write PSFrag tags
to a temporary postscript file, each one marking a position for LaTeX to
render some text. convert_psfrags generates a LaTeX document containing the
commands to convert those tags to text. LaTeX/dvips produces the postscript
file that includes the actual text.
"""
tmpdir = os.path.split(tmpfile)[0]
epsfile = tmpfile+'.eps'
shutil.move(tmpfile, epsfile)
latexfile = tmpfile+'.tex'
outfile = tmpfile+'.output'
latexh = file(latexfile, 'w')
dvifile = tmpfile+'.dvi'
psfile = tmpfile+'.ps'
if orientation=='landscape': angle = 90
else: angle = 0
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[dvips, papersize={%sin,%sin}, body={%sin,%sin}, margin={0in,0in}]{geometry}
\usepackage{psfrag}
\usepackage[dvips]{graphicx}
\usepackage{color}
\pagestyle{empty}
\begin{document}
\begin{figure}
\centering
\leavevmode
%s
\includegraphics*[angle=%s]{%s}
\end{figure}
\end{document}
"""% (font_preamble, unicode_preamble, custom_preamble, paperWidth, paperHeight,
paperWidth, paperHeight,
'\n'.join(psfrags), angle, os.path.split(epsfile)[-1])
if rcParams['text.latex.unicode']:
latexh.write(s.encode('utf8'))
else:
try:
latexh.write(s)
except UnicodeEncodeError, err:
verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
latexh.close()
# the split drive part of the command is necessary for windows users with
# multiple
if sys.platform == 'win32': precmd = '%s &&'% os.path.splitdrive(tmpdir)[0]
else: precmd = ''
command = '%s cd "%s" && latex -interaction=nonstopmode "%s" > "%s"'\
%(precmd, tmpdir, latexfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('LaTeX was not able to process your file:\
\nHere is the full report generated by LaTeX: \n\n%s'% fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
command = '%s cd "%s" && dvips -q -R0 -o "%s" "%s" > "%s"'%(precmd, tmpdir,
os.path.split(psfile)[-1], os.path.split(dvifile)[-1], outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('dvips was not able to \
process the following file:\n%s\nHere is the full report generated by dvips: \
\n\n'% dvifile + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(epsfile)
shutil.move(psfile, tmpfile)
if not debugPS:
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None):
"""
Use ghostscript's pswrite or epswrite device to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. The output is low-level, converting text to outlines.
"""
paper = '-sPAPERSIZE=%s'% ptype
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
dpi = rcParams['ps.distiller.res']
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
command = '%s -dBATCH -dNOPAUSE -r%d -sDEVICE=pswrite %s -sOutputFile="%s" \
"%s" > "%s"'% (gs_exe, dpi, paper, psfile, tmpfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('ghostscript was not able to process \
your image.\nHere is the full report generated by ghostscript:\n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile, bbox)
def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None):
"""
Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. This distiller is preferred, generating high-level postscript
output that treats text as text.
"""
pdffile = tmpfile + '.pdf'
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
command = 'ps2pdf -dAutoFilterColorImages=false \
-sColorImageFilter=FlateEncode -sPAPERSIZE=%s "%s" "%s" > "%s"'% \
(ptype, tmpfile, pdffile, outfile)
if sys.platform == 'win32': command = command.replace('=', '#')
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('ps2pdf was not able to process your \
image.\n\Here is the report generated by ghostscript:\n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
command = 'pdftops -paper match -level2 "%s" "%s" > "%s"'% \
(pdffile, psfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('pdftops was not able to process your \
image.\nHere is the full report generated by pdftops: \n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile, bbox)
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def get_bbox(tmpfile, bbox):
"""
Use ghostscript's bbox device to find the center of the bounding box. Return
an appropriately sized bbox centered around that point. A bit of a hack.
"""
outfile = tmpfile + '.output'
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
command = '%s -dBATCH -dNOPAUSE -sDEVICE=bbox "%s"' %\
(gs_exe, tmpfile)
verbose.report(command, 'debug')
stdin, stdout, stderr = os.popen3(command)
verbose.report(stdout.read(), 'debug-annoying')
bbox_info = stderr.read()
verbose.report(bbox_info, 'helpful')
bbox_found = re.search('%%HiResBoundingBox: .*', bbox_info)
if bbox_found:
bbox_info = bbox_found.group()
else:
raise RuntimeError('Ghostscript was not able to extract a bounding box.\
Here is the Ghostscript output:\n\n%s'% bbox_info)
l, b, r, t = [float(i) for i in bbox_info.split()[-4:]]
# this is a hack to deal with the fact that ghostscript does not return the
# intended bbox, but a tight bbox. For now, we just center the ink in the
# intended bbox. This is not ideal, users may intend the ink to not be
# centered.
if bbox is None:
l, b, r, t = (l-1, b-1, r+1, t+1)
else:
x = (l+r)/2
y = (b+t)/2
dx = (bbox[2]-bbox[0])/2
dy = (bbox[3]-bbox[1])/2
l,b,r,t = (x-dx, y-dy, x+dx, y+dy)
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, npy.ceil(r), npy.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info])
def pstoeps(tmpfile, bbox):
"""
Convert the postscript to encapsulated postscript.
"""
bbox_info = get_bbox(tmpfile, bbox)
epsfile = tmpfile + '.eps'
epsh = file(epsfile, 'w')
tmph = file(tmpfile)
line = tmph.readline()
# Modify the header:
while line:
if line.startswith('%!PS'):
print >>epsh, "%!PS-Adobe-3.0 EPSF-3.0"
print >>epsh, bbox_info
elif line.startswith('%%EndComments'):
epsh.write(line)
print >>epsh, '%%BeginProlog'
print >>epsh, 'save'
print >>epsh, 'countdictstack'
print >>epsh, 'mark'
print >>epsh, 'newpath'
print >>epsh, '/showpage {} def'
print >>epsh, '/setpagedevice {pop} def'
print >>epsh, '%%EndProlog'
print >>epsh, '%%Page 1 1'
break
elif line.startswith('%%Bound') \
or line.startswith('%%HiResBound') \
or line.startswith('%%Pages'):
pass
else:
epsh.write(line)
line = tmph.readline()
# Now rewrite the rest of the file, and modify the trailer.
# This is done in a second loop such that the header of the embedded
# eps file is not modified.
line = tmph.readline()
while line:
if line.startswith('%%Trailer'):
print >>epsh, '%%Trailer'
print >>epsh, 'cleartomark'
print >>epsh, 'countdictstack'
print >>epsh, 'exch sub { end } repeat'
print >>epsh, 'restore'
if rcParams['ps.usedistiller'] == 'xpdf':
# remove extraneous "end" operator:
line = tmph.readline()
else:
epsh.write(line)
line = tmph.readline()
tmph.close()
epsh.close()
os.remove(tmpfile)
shutil.move(epsfile, tmpfile)
class FigureManagerPS(FigureManagerBase):
pass
FigureManager = FigureManagerPS
# The following Python dictionary psDefs contains the entries for the
# PostScript dictionary mpldict. This dictionary implements most of
# the matplotlib primitives and some abbreviations.
#
# References:
# http://www.adobe.com/products/postscript/pdfs/PLRM.pdf
# http://www.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial/
# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/
#
# The usage comments use the notation of the operator summary
# in the PostScript Language reference manual.
psDefs = [
# x y *m* -
"/m { moveto } bind def",
# x y *l* -
"/l { lineto } bind def",
# x y *r* -
"/r { rlineto } bind def",
# x1 y1 x2 y2 x y *c* -
"/c { curveto } bind def",
# *closepath* -
"/cl { closepath } bind def",
# w h x y *box* -
"""/box {
m
1 index 0 r
0 exch r
neg 0 r
cl
} bind def""",
# w h x y *clipbox* -
"""/clipbox {
box
clip
newpath
} bind def""",
]
| agpl-3.0 |
trondkr/romstools | TimeseriesAndDomainAnalysis/createAveragesOnHexagon_part2.py | 1 | 5818 | import paramiko
import os
import pandas as pd
import datetime as datetime
__author__ = 'Trond Kristiansen'
__email__ = 'me (at) trondkristiansen.com'
__created__ = datetime.datetime(2014, 2, 11)
__modified__ = datetime.datetime(2014, 3, 17)
__version__ = "1.0.0"
__status__ = "Production"
"""Start EDIT"""
myvars=['temp','salt']
myprefix='regscen_hindcast'
apattern = 'AA_10km_z.nc_*'
outfile2 = 'allfiles.nc'
outfile = 'allfiles_region.nc'
myhostname="hexagon.bccs.uib.no"
myusername="trondk"
mypassword="passord"
remotedir="/work/users/trondk/REGSCEN/"
remoteSTORAGEdir="/work/jonal/AA_10km/monthly/"
localdir="/Users/trondkr/Projects/RegScen/Analysis/"
first=False
"""
This script calculates the domain averages of salinity and temperature at all depth
levels available. These timeseries will be weighted later to calculate the domain averaged
monthly fields within depth layers. In addition, timeseries and domain averages of SSH and SHFLUX
is calculated togetehr with the trend of SSH (zeta).
Calculations done:
- Domain averages of salinity and temperature at all depth levels
- timeseries and domain averages of zeta and shflux
- trend of zeta (sea level height)
"""
alldepths=[0, 5, 10, 20, 30, 50, 75, 100, 150, 200, 250, 300, 400, 500,
600, 700, 800, 1000, 1250, 1500, 1750, 2000, 2250, 2500, 2750, 3000]
"""Stop EDIT"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(myhostname,username=myusername,password=mypassword)
# Prepare the environment
command = 'cd %s'%(remotedir)
stdin, stdout, stderr = ssh.exec_command(command)
ftp = ssh.open_sftp()
command = 'find %s -name %s'%(remoteSTORAGEdir,apattern)
stdin, stdout, stderr = ssh.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
filelist = stdout.read().splitlines()
for myvar in myvars:
# Extract all data of variable 'myvar'
command = 'cdo -select,name=%s %s%s %s%s '%(myvar,remoteSTORAGEdir,apattern,remotedir,outfile)
stdin, stdout, stderr = ssh.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
for mylevel in alldepths:
mylevel=str(mylevel)
print "Extracting data for depth level: %s"%(mylevel)
# Extract data at seperate depth levels of variable 'myvar'
outfileLevel = str(myprefix)+'_'+str(myvar)+'_depth_'+str(mylevel)+'.nc'
command = 'cdo sellevel,%s %s%s %s%s'%(mylevel,remotedir,outfile,remotedir,outfileLevel)
stdin, stdout, stderr = ssh.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
# Calculate domain average at seperate depth levels of variable 'myvar'
outfileAverageLevel = str(myprefix)+'_fldmean_'+str(myvar)+'_depth_'+str(mylevel)+'.nc'
command = 'cdo fldmean %s%s %s%s'%(remotedir,outfileLevel,remotedir,outfileAverageLevel)
stdin, stdout, stderr = ssh.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
remotefile=remotedir+outfileAverageLevel
print "Calculations done and downloading file: %s"%(remotefile)
localfile=localdir+outfileAverageLevel
ftp.get(remotefile, localfile)
print "\n"
# Calculate trend of SLA
# Extract all data of variable zeta
command = 'cdo -sellonlatbox,-30,15,60,90 -select,name=zeta %s%s %s%s'%(remoteSTORAGEdir,apattern,remotedir,outfile)
stdin, stdout, stderr = ssh.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
outfileAverageLevelB = str(myprefix)+'_trend_zeta_trend.nc'
outfileAverageLevelA = str(myprefix)+'_trend_zeta_intercept.nc'
command = 'cdo trend %s%s %s%s %s%s'%(remotedir,outfile,remotedir,outfileAverageLevelA,remotedir,outfileAverageLevelB)
stdin, stdout, stderr = ssh.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
remotefile=remotedir+outfileAverageLevelB
print "ZETA Calculations done and downloading file: %s"%(remotefile)
localfile=localdir+outfileAverageLevelB
ftp.get(remotefile, localfile)
print "\n"
# Calculate domain average of ZETA
outfileAverage = str(myprefix)+'_fldmean_zeta.nc'
command = 'cdo fldmean %s%s %s%s'%(remotedir,outfile,remotedir,outfileAverage)
stdin, stdout, stderr = ssh.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
remotefile=remotedir+outfileAverage
print "Calculations done and downloading file: %s"%(remotefile)
localfile=localdir+outfileAverage
ftp.get(remotefile, localfile)
print "\n"
# SHFLUX
# Extract all data of variable ssflux
command = 'cdo -sellonlatbox,-30,15,60,90 -select,name=shflux %s%s %s%s'%(remoteSTORAGEdir,apattern,remotedir,outfile)
stdin, stdout, stderr = ssh.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
# Calculate domain average of shflux - timeseries
outfileAverage = str(myprefix)+'_fldmean_shflux.nc'
command = 'cdo fldmean %s%s %s%s'%(remotedir,outfile,remotedir,outfileAverage)
stdin, stdout, stderr = ssh.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
remotefile=remotedir+outfileAverage
localfile=localdir+outfileAverage
ftp.get(remotefile, localfile)
print "\n"
# Calculate time average of surface fresh water flux areamap
outfileAverage = str(myprefix)+'_timmean_shflux.nc'
command = 'cdo timmean %s%s %s%s'%(remotedir,outfile,remotedir,outfileAverage)
stdin, stdout, stderr = ssh.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
remotefile=remotedir+outfileAverage
print "SHFLUX Calculations done and downloading file: %s"%(remotefile)
localfile=localdir+outfileAverage
ftp.get(remotefile, localfile)
print "\n"
ftp.close()
ssh.close()
print "Program finished" | mit |
jmetzen/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 311 | 5431 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
terkkila/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
vshtanko/scikit-learn | sklearn/covariance/graph_lasso_.py | 127 | 25626 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol, enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
| bsd-3-clause |
andimarafioti/AIAMI | Evaluation/testC-gamma.py | 1 | 2960 | # -*- coding: utf-8 -*-
from sklearn import svm
import numpy as np
import matplotlib.pyplot as plt
from utils.FScore import F1Score
from Identification.LoadDescriptors import loadAllDescriptors
from Identification.PreprocessingDescriptors import preprocessDescriptors
from Identification.TrainCvTest import separateDatabases
Descriptors = loadAllDescriptors(reverbs=False)
normalized_features, yClass, features_names = preprocessDescriptors(Descriptors)
del Descriptors # Ya no lo voy a utilizar
normalizedTrain, yTrain, normalizedCV, yCV, normalizedTest, yTest = separateDatabases(normalized_features, yClass)
C = np.linspace(0.5, 2.5, 10)
Gamma = np.linspace(1e-2, 0.2, 10)
def test_c_gamma(training_features, training_classes, test_features, test_classes, c, gamma):
f_cv = []
f_train = []
for iii in c:
for jjj in gamma:
clf = svm.SVC(C=iii, gamma=jjj, cache_size=1000)
clf.fit(training_features, training_classes)
f_train = np.append(f_train, np.mean(F1Score(training_features, training_classes, clf).values()))
f_cv = np.append(f_cv, np.mean(F1Score(test_features, test_classes, clf).values()))
print iii
f_cv_matrix = np.reshape(f_cv, (len(C), len(gamma)))
f_train_matrix = np.reshape(f_train, (len(C), len(gamma)))
return f_train_matrix, f_cv_matrix
F1TrainScoreMatrix, F1CVScoreMatrix = test_c_gamma(training_features=normalizedTrain, training_classes=yTrain,
test_features=normalizedCV, test_classes=yCV, c=C, gamma=Gamma)
first_dimension = np.array([C] * F1TrainScoreMatrix.shape[1]).T
second_dimension = np.array([Gamma] * F1TrainScoreMatrix.shape[0])
fig = plt.figure()
ax = fig.gca(projection='3d')
jet = plt.get_cmap('jet')
# surf = ax.plot_surface(second_dimension, first_dimension, F1CVScoreMatrix,
# rstride=1, cstride=1, cmap=jet, linewidth=0, antialiased=False)
surf = ax.plot_surface(second_dimension, first_dimension, F1CVScoreMatrix,
rstride=1, cstride=1, cmap=jet, linewidth=0, antialiased=False)
ax.set_zlim3d(0, 1)
#title = unicode('Evaluación de los parámetros de\nregularización C y $\Gamma$ sobre la base de\nvalidación cruzada','utf-8')
#ax.set_title(title, fontsize=20)
ax.set_xlabel('$\Gamma$', fontsize=20)
ax.set_ylabel('C', fontsize=20)
plt.show()
# fig = plt.figure()
# ax = plt.plot(first_dimension[:, 1], F1CVScoreMatrix[:, 1], 'r', first_dimension[:, 1], F1TrainScoreMatrix[:, 1], 'b')
# plt.xlabel('C', fontsize=30)
# plt.text(1.9, 0.95, 'Medida-F en la base\nde entrenamiento',
# color='blue', fontsize=25)
# text = unicode('Medida-F en la base\nde validacion cruzada', 'utf-8')
# plt.text(1.9, 0.9, text,
# color='red', fontsize=25)
# plt.tick_params(axis='both', which='major', labelsize=15)
# plt.tick_params(axis='both', which='minor', labelsize=15)
# plt.setp(ax, linewidth=5.0)
# plt.show()
| mit |
gfyoung/pandas | pandas/core/flags.py | 6 | 3567 | import weakref
class Flags:
"""
Flags that apply to pandas objects.
.. versionadded:: 1.2.0
Parameters
----------
obj : Series or DataFrame
The object these flags are associated with.
allows_duplicate_labels : bool, default True
Whether to allow duplicate labels in this object. By default,
duplicate labels are permitted. Setting this to ``False`` will
cause an :class:`errors.DuplicateLabelError` to be raised when
`index` (or columns for DataFrame) is not unique, or any
subsequent operation on introduces duplicates.
See :ref:`duplicates.disallow` for more.
.. warning::
This is an experimental feature. Currently, many methods fail to
propagate the ``allows_duplicate_labels`` value. In future versions
it is expected that every method taking or returning one or more
DataFrame or Series objects will propagate ``allows_duplicate_labels``.
Notes
-----
Attributes can be set in two ways
>>> df = pd.DataFrame()
>>> df.flags
<Flags(allows_duplicate_labels=True)>
>>> df.flags.allows_duplicate_labels = False
>>> df.flags
<Flags(allows_duplicate_labels=False)>
>>> df.flags['allows_duplicate_labels'] = True
>>> df.flags
<Flags(allows_duplicate_labels=True)>
"""
_keys = {"allows_duplicate_labels"}
def __init__(self, obj, *, allows_duplicate_labels):
self._allows_duplicate_labels = allows_duplicate_labels
self._obj = weakref.ref(obj)
@property
def allows_duplicate_labels(self) -> bool:
"""
Whether this object allows duplicate labels.
Setting ``allows_duplicate_labels=False`` ensures that the
index (and columns of a DataFrame) are unique. Most methods
that accept and return a Series or DataFrame will propagate
the value of ``allows_duplicate_labels``.
See :ref:`duplicates` for more.
See Also
--------
DataFrame.attrs : Set global metadata on this object.
DataFrame.set_flags : Set global flags on this object.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2]}, index=['a', 'a'])
>>> df.allows_duplicate_labels
True
>>> df.allows_duplicate_labels = False
Traceback (most recent call last):
...
pandas.errors.DuplicateLabelError: Index has duplicates.
positions
label
a [0, 1]
"""
return self._allows_duplicate_labels
@allows_duplicate_labels.setter
def allows_duplicate_labels(self, value: bool):
value = bool(value)
obj = self._obj()
if obj is None:
raise ValueError("This flag's object has been deleted.")
if not value:
for ax in obj.axes:
ax._maybe_check_unique()
self._allows_duplicate_labels = value
def __getitem__(self, key):
if key not in self._keys:
raise KeyError(key)
return getattr(self, key)
def __setitem__(self, key, value):
if key not in self._keys:
raise ValueError(f"Unknown flag {key}. Must be one of {self._keys}")
setattr(self, key, value)
def __repr__(self):
return f"<Flags(allows_duplicate_labels={self.allows_duplicate_labels})>"
def __eq__(self, other):
if isinstance(other, type(self)):
return self.allows_duplicate_labels == other.allows_duplicate_labels
return False
| bsd-3-clause |
markneville/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/mlab.py | 69 | 104273 | """
Numerical python functions written for compatability with matlab(TM)
commands with the same names.
Matlab(TM) compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (power spectral density over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in matlab(TM), but are useful anyway:
:meth:`cohere_pairs`
Coherence over all pairs. This is not a matlab function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:meth:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly::
See :ref:`misc-examples-index`
:meth:`rec2txt`
pretty print a record array
:meth:`rec2csv`
store record array in CSV file
:meth:`csv2rec`
import record array from CSV file with type inspection
:meth:`rec_append_fields`
adds field(s)/array(s) to record array
:meth:`rec_drop_fields`
drop fields from record array
:meth:`rec_join`
join two record arrays on sequence of fields
:meth:`rec_groupby`
summarize data by groups (similar to SQL GROUP BY)
:meth:`rec_summarize`
helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
Deprecated functions
---------------------
The following are deprecated; please import directly from numpy (with
care--function signatures may differ):
:meth:`conv`
convolution (numpy.convolve)
:meth:`corrcoef`
The matrix of correlation coefficients
:meth:`hist`
Histogram (numpy.histogram)
:meth:`linspace`
Linear spaced array from min to max
:meth:`load`
load ASCII file - use numpy.loadtxt
:meth:`meshgrid`
Make a 2D grid from 2 1 arrays (numpy.meshgrid)
:meth:`polyfit`
least squares best polynomial fit of x to y (numpy.polyfit)
:meth:`polyval`
evaluate a vector for a vector of polynomial coeffs (numpy.polyval)
:meth:`save`
save ASCII file - use numpy.savetxt
:meth:`trapz`
trapeziodal integration (trapz(x,y) -> numpy.trapz(y,x))
:meth:`vander`
the Vandermonde matrix (numpy.vander)
"""
from __future__ import division
import csv, warnings, copy, os
import numpy as np
ma = np.ma
from matplotlib import verbose
import matplotlib.nxutils as nxutils
import matplotlib.cbook as cbook
# set is a new builtin function in 2.4; delete the following when
# support for 2.3 is dropped.
try:
set
except NameError:
from sets import Set as set
def linspace(*args, **kw):
warnings.warn("use numpy.linspace", DeprecationWarning)
return np.linspace(*args, **kw)
def meshgrid(x,y):
warnings.warn("use numpy.meshgrid", DeprecationWarning)
return np.meshgrid(x,y)
def mean(x, dim=None):
warnings.warn("Use numpy.mean(x) or x.mean()", DeprecationWarning)
if len(x)==0: return None
return np.mean(x, axis=dim)
def logspace(xmin,xmax,N):
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
def _norm(x):
"return sqrt(x dot x)"
return np.sqrt(np.dot(x,x))
def window_hanning(x):
"return x times the hanning window of len(x)"
return np.hanning(len(x))*x
def window_none(x):
"No window function; simply return x"
return x
#from numpy import convolve as conv
def conv(x, y, mode=2):
'convolve x with y'
warnings.warn("Use numpy.convolve(x, y, mode='full')", DeprecationWarning)
return np.convolve(x,y,mode)
def detrend(x, key=None):
if key is None or key=='constant':
return detrend_mean(x)
elif key=='linear':
return detrend_linear(x)
def demean(x, axis=0):
"Return x minus its mean along the specified axis"
x = np.asarray(x)
if axis:
ind = [slice(None)] * axis
ind.append(np.newaxis)
return x - x.mean(axis)[ind]
return x - x.mean(axis)
def detrend_mean(x):
"Return x minus the mean(x)"
return x - x.mean()
def detrend_none(x):
"Return x: no detrending"
return x
def detrend_linear(y):
"Return y minus best fit line; 'linear' detrending "
# This is faster than an algorithm based on linalg.lstsq.
x = np.arange(len(y), dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0,1]/C[0,0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
#This is a helper function that implements the commonality between the
#psd, csd, and spectrogram. It is *NOT* meant to be used outside of mlab
def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None):
#The checks for if y is x are so that we can use the same function to
#implement the core of psd(), csd(), and spectrogram() without doing
#extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
#Make sure we're dealing with a numpy array. If y and x were the same
#object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x)<NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y)<NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if scale_by_freq is None:
warnings.warn("psd, csd, and specgram have changed to scale their "
"densities by the sampling frequency for better MatLab "
"compatibility. You can pass scale_by_freq=False to disable "
"this behavior. Also, one-sided densities are scaled by a "
"factor of 2.")
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided':
numFreqs = pad_to
scaling_factor = 1.
elif sides in ('default', 'onesided'):
numFreqs = pad_to//2 + 1
scaling_factor = 2.
else:
raise ValueError("sides must be one of: 'default', 'onesided', or "
"'twosided'")
# Matlab divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
scaling_factor /= Fs
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
Pxy = np.zeros((numFreqs,n), np.complex_)
# do the ffts of the slices
for i in range(n):
thisX = x[ind[i]:ind[i]+NFFT]
thisX = windowVals * detrend(thisX)
fx = np.fft.fft(thisX, n=pad_to)
if same_data:
fy = fx
else:
thisY = y[ind[i]:ind[i]+NFFT]
thisY = windowVals * detrend(thisY)
fy = np.fft.fft(thisY, n=pad_to)
Pxy[:,i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs]
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2. Also include
# scaling factors for one-sided densities and dividing by the sampling
# frequency, if desired.
Pxy *= scaling_factor / (np.abs(windowVals)**2).sum()
t = 1./Fs * (ind + NFFT / 2.)
freqs = float(Fs) / pad_to * np.arange(numFreqs)
return Pxy, freqs, t
#Split out these keyword docs so that they can be used elsewhere
kwdocd = dict()
kwdocd['PSD'] ="""
Keyword arguments:
*NFFT*: integer
The number of data points used in each block for the FFT.
Must be even; a power 2 is most efficient. The default value is 256.
*Fs*: scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
*detrend*: callable
The function applied to each segment before fft-ing,
designed to remove the mean or linear trend. Unlike in
matlab, where the *detrend* parameter is a vector, in
matplotlib is it a function. The :mod:`~matplotlib.pylab`
module defines :func:`~matplotlib.pylab.detrend_none`,
:func:`~matplotlib.pylab.detrend_mean`, and
:func:`~matplotlib.pylab.detrend_linear`, but you can use
a custom function as well.
*window*: callable or ndarray
A function or a vector of length *NFFT*. To create window
vectors see :func:`window_hanning`, :func:`window_none`,
:func:`numpy.blackman`, :func:`numpy.hamming`,
:func:`numpy.bartlett`, :func:`scipy.signal`,
:func:`scipy.signal.get_window`, etc. The default is
:func:`window_hanning`. If a function is passed as the
argument, it must take a data segment as an argument and
return the windowed version of the segment.
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. This can be different from *NFFT*, which
specifies the number of data points used. While not increasing
the actual resolution of the psd (the minimum distance between
resolvable peaks), this can give more points in the plot,
allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to*
equal to *NFFT*
*sides*: [ 'default' | 'onesided' | 'twosided' ]
Specifies which sides of the PSD to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided PSD,
while 'twosided' forces two-sided.
*scale_by_freq*: boolean
Specifies whether the resulting density values should be scaled
by the scaling frequency, which gives density in units of Hz^-1.
This allows for integration over the returned frequency values.
The default is True for MatLab compatibility.
"""
def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The power spectral density by Welch's average periodogram method.
The vector *x* is divided into *NFFT* length blocks. Each block
is detrended by the function *detrend* and windowed by the function
*window*. *noverlap* gives the length of the overlap between blocks.
The absolute(fft(block))**2 of each segment are averaged to compute
*Pxx*, with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxx*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxx,freqs = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
return Pxx.real,freqs
psd.__doc__ = psd.__doc__ % kwdocd
def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The cross power spectral density by Welch's average periodogram
method. The vectors *x* and *y* are divided into *NFFT* length
blocks. Each block is detrended by the function *detrend* and
windowed by the function *window*. *noverlap* gives the length
of the overlap between blocks. The product of the direct FFTs
of *x* and *y* are averaged over each segment to compute *Pxy*,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*
Array or sequence containing the data
%(PSD)s
Returns the tuple (*Pxy*, *freqs*).
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
"""
Pxy, freqs, t = _spectral_helper(x, y, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
if len(Pxy.shape) == 2 and Pxy.shape[1]>1:
Pxy = Pxy.mean(axis=1)
return Pxy, freqs
csd.__doc__ = csd.__doc__ % kwdocd
def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=128, pad_to=None, sides='default', scale_by_freq=None):
"""
Compute a spectrogram of data in *x*. Data are split into *NFFT*
length segements and the PSD of each section is computed. The
windowing function *window* is applied to each segment, and the
amount of overlap of each segment is specified with *noverlap*.
If *x* is real (i.e. non-complex) only the spectrum of the positive
frequencie is returned. If *x* is complex then the complete
spectrum is returned.
%(PSD)s
Returns a tuple (*Pxx*, *freqs*, *t*):
- *Pxx*: 2-D array, columns are the periodograms of
successive segments
- *freqs*: 1-D array of frequencies corresponding to the rows
in Pxx
- *t*: 1-D array of times corresponding to midpoints of
segments.
.. seealso::
:func:`psd`:
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
"""
assert(NFFT > noverlap)
Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window,
noverlap, pad_to, sides, scale_by_freq)
Pxx = Pxx.real #Needed since helper implements generically
if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[NFFT/2:]-Fs,freqs[:NFFT/2]))
Pxx = np.concatenate((Pxx[NFFT/2:,:],Pxx[:NFFT/2,:]),0)
return Pxx, freqs, t
specgram.__doc__ = specgram.__doc__ % kwdocd
_coh_error = """Coherence is calculated by averaging over *NFFT*
length segments. Your signal is too short for your choice of *NFFT*.
"""
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
*x*, *y*
Array or sequence containing the data
%(PSD)s
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect, since
the factors cancel out.
.. seealso::
:func:`psd` and :func:`csd`:
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x)<2*NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy)
Cxy.shape = (len(f),)
return Cxy, f
cohere.__doc__ = cohere.__doc__ % kwdocd
def corrcoef(*args):
"""
corrcoef(*X*) where *X* is a matrix returns a matrix of correlation
coefficients for the columns of *X*
corrcoef(*x*, *y*) where *x* and *y* are vectors returns the matrix of
correlation coefficients for *x* and *y*.
Numpy arrays can be real or complex.
The correlation matrix is defined from the covariance matrix *C*
as
.. math::
r_{ij} = \\frac{C_{ij}}{\\sqrt{C_{ii}C_{jj}}}
"""
warnings.warn("Use numpy.corrcoef", DeprecationWarning)
kw = dict(rowvar=False)
return np.corrcoef(*args, **kw)
def polyfit(*args, **kwargs):
u"""
polyfit(*x*, *y*, *N*)
Do a best fit polynomial of order *N* of *y* to *x*. Return value
is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for
*N*=2::
p2*x0^2 + p1*x0 + p0 = y1
p2*x1^2 + p1*x1 + p0 = y1
p2*x2^2 + p1*x2 + p0 = y2
.....
p2*xk^2 + p1*xk + p0 = yk
Method: if *X* is a the Vandermonde Matrix computed from *x* (see
`vandermonds
<http://mathworld.wolfram.com/VandermondeMatrix.html>`_), then the
polynomial least squares solution is given by the '*p*' in
X*p = y
where *X* is a (len(*x*) \N{MULTIPLICATION SIGN} *N* + 1) matrix,
*p* is a *N*+1 length vector, and *y* is a (len(*x*)
\N{MULTIPLICATION SIGN} 1) vector.
This equation can be solved as
.. math::
p = (X_t X)^-1 X_t y
where :math:`X_t` is the transpose of *X* and -1 denotes the
inverse. Numerically, however, this is not a good method, so we
use :func:`numpy.linalg.lstsq`.
For more info, see `least squares fitting
<http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html>`_,
but note that the *k*'s and *n*'s in the superscripts and
subscripts on that page. The linear algebra is correct, however.
.. seealso::
:func:`polyval`
"""
warnings.warn("use numpy.poyfit", DeprecationWarning)
return np.polyfit(*args, **kwargs)
def polyval(*args, **kwargs):
"""
*y* = polyval(*p*, *x*)
*p* is a vector of polynomial coeffients and *y* is the polynomial
evaluated at *x*.
Example code to remove a polynomial (quadratic) trend from y::
p = polyfit(x, y, 2)
trend = polyval(p, x)
resid = y - trend
.. seealso::
:func:`polyfit`
"""
warnings.warn("use numpy.polyval", DeprecationWarning)
return np.polyval(*args, **kwargs)
def vander(*args, **kwargs):
"""
*X* = vander(*x*, *N* = *None*)
The Vandermonde matrix of vector *x*. The *i*-th column of *X* is the
the *i*-th power of *x*. *N* is the maximum power to compute; if *N* is
*None* it defaults to len(*x*).
"""
warnings.warn("Use numpy.vander()", DeprecationWarning)
return np.vander(*args, **kwargs)
def donothing_callback(*args):
pass
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
u"""
Cxy, Phase, freqs = cohere_pairs(X, ij, ...)
Compute the coherence for all pairs in *ij*. *X* is a
(*numSamples*, *numCols*) numpy array. *ij* is a list of tuples
(*i*, *j*). Each tuple is a pair of indexes into the columns of *X*
for which you want to compute coherence. For example, if *X* has 64
columns, and you want to compute all nonredundant pairs, define *ij*
as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i, j) )
The other function arguments, except for *preferSpeedOverMemory*
(see below), are explained in the help string of :func:`psd`.
Return value is a tuple (*Cxy*, *Phase*, *freqs*).
- *Cxy*: a dictionary of (*i*, *j*) tuples -> coherence vector for that
pair. I.e., ``Cxy[(i,j)] = cohere(X[:,i], X[:,j])``. Number of
dictionary keys is ``len(ij)``.
- *Phase*: a dictionary of phases of the cross spectral density at
each frequency for each pair. The keys are ``(i,j)``.
- *freqs*: a vector of frequencies, equal in length to either
the coherence or phase vectors for any (*i*, *j*) key.. Eg,
to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If *N* is the
number of pairs, this function is O(N) for most of the heavy
lifting, whereas calling cohere for each pair is
O(N\N{SUPERSCRIPT TWO}). However, because of the caching, it is
also more memory intensive, making 2 additional complex arrays
with approximately the same number of elements as *X*.
The parameter *preferSpeedOverMemory*, if *False*, limits the
caching by only making one, rather than two, complex cache arrays.
This is useful if memory becomes critical. Even when
*preferSpeedOverMemory* is *False*, :func:`cohere_pairs` will
still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is *True*. In my tests
with a (43000, 64) array over all non-redundant pairs,
*preferSpeedOverMemory* = *True* delivered a 33% performace boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = *False*. But both solutions were more
than 10x faster than naievly crunching all possible pairs through
cohere.
.. seealso::
:file:`test/cohere_pairs_test.py` in the src tree:
For an example script that shows that this
:func:`cohere_pairs` and :func:`cohere` give the same
results for a given pair.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros( (NFFT, numCols), X.dtype)
X[:numRows,:] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
seen = {}
for i,j in ij:
seen[i]=1; seen[j] = 1
allColumns = seen.keys()
Ncols = len(allColumns)
del seen
# for real X, ignore the negative frequencies
if np.iscomplexobj(X): numFreqs = NFFT
else: numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones((NFFT,), typecode(X)))
ind = range(0, numRows-NFFT+1, NFFT-noverlap)
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = range(numSlices)
normVal = norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice,:] = fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = conjugate(Slices)
Pxx[iCol] = np.divide(np.mean(absolute(Slices)**2), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i,j in ij:
count +=1
if count%10==0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])
if numSlices>1: Pxy = np.mean(Pxy)
Pxy = np.divide(Pxy, normVal)
Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Phase[(i,j)] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def entropy(y, bins):
r"""
Return the entropy of the data in *y*.
.. math::
\sum p_i \log_2(p_i)
where :math:`p_i` is the probability of observing *y* in the
:math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
range of bins; see :func:`numpy.histogram`.
Compare *S* with analytic calculation for a Gaussian::
x = mu + sigma * randn(200000)
Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
"""
n,bins = np.histogram(y, bins)
n = n.astype(np.float_)
n = np.take(n, np.nonzero(n)[0]) # get the positive
p = np.divide(n, len(y))
delta = bins[1]-bins[0]
S = -1.0*np.sum(p*log(p)) + log(delta)
#S = -1.0*np.sum(p*log(p))
return S
def hist(y, bins=10, normed=0):
"""
Return the histogram of *y* with *bins* equally sized bins. If
bins is an array, use those bins. Return value is (*n*, *x*)
where *n* is the count for each bin in *x*.
If *normed* is *False*, return the counts in the first element of
the returned tuple. If *normed* is *True*, return the probability
density :math:`\\frac{n}{(len(y)\mathrm{dbin}}`.
If *y* has rank > 1, it will be raveled. If *y* is masked, only the
unmasked values will be used.
Credits: the Numeric 22 documentation
"""
warnings.warn("Use numpy.histogram()", DeprecationWarning)
return np.histogram(y, bins=bins, range=None, normed=normed)
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def levypdf(x, gamma, alpha):
"Returm the levy pdf evaluated at *x* for params *gamma*, *alpha*"
N = len(x)
if N%2 != 0:
raise ValueError, 'x must be an event length array; try\n' + \
'x = np.linspace(minx, maxx, N), where N is even'
dx = x[1]-x[0]
f = 1/(N*dx)*np.arange(-N/2, N/2, np.float_)
ind = np.concatenate([np.arange(N/2, N, int),
np.arange(0, N/2, int)])
df = f[1]-f[0]
cfl = exp(-gamma*np.absolute(2*pi*f)**alpha)
px = np.fft.fft(np.take(cfl,ind)*df).astype(np.float_)
return np.take(px, ind)
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def trapz(x, y):
"""
Trapezoidal integral of *y*(*x*).
"""
warnings.warn("Use numpy.trapz(y,x) instead of trapz(x,y)", DeprecationWarning)
return np.trapz(y, x)
#if len(x)!=len(y):
# raise ValueError, 'x and y must have the same length'
#if len(x)<2:
# raise ValueError, 'x and y must have > 1 element'
#return np.sum(0.5*np.diff(x)*(y[1:]+y[:-1]))
def longest_contiguous_ones(x):
"""
Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first.
"""
x = np.ravel(x)
if len(x)==0:
return np.array([])
ind = (x==0).nonzero()[0]
if len(ind)==0:
return np.arange(len(x))
if len(ind)==len(x):
return np.array([])
y = np.zeros( (len(x)+2,), x.dtype)
y[1:-1] = x
dif = np.diff(y)
up = (dif == 1).nonzero()[0];
dn = (dif == -1).nonzero()[0];
i = (dn-up == max(dn - up)).nonzero()[0][0]
ind = np.arange(up[i], dn[i])
return ind
def longest_ones(x):
'''alias for longest_contiguous_ones'''
return longest_contiguous_ones(x)
def prepca(P, frac=0):
"""
Compute the principal components of *P*. *P* is a (*numVars*,
*numObs*) array. *frac* is the minimum fraction of variance that a
component must contain to be included.
Return value is a tuple of the form (*Pcomponents*, *Trans*,
*fracVar*) where:
- *Pcomponents* : a (numVars, numObs) array
- *Trans* : the weights matrix, ie, *Pcomponents* = *Trans* *
*P*
- *fracVar* : the fraction of the variance accounted for by each
component returned
A similar function of the same name was in the Matlab (TM)
R13 Neural Network Toolbox but is not found in later versions;
its successor seems to be called "processpcs".
"""
U,s,v = np.linalg.svd(P)
varEach = s**2/P.shape[1]
totVar = varEach.sum()
fracVar = varEach/totVar
ind = slice((fracVar>=frac).sum())
# select the components that are greater
Trans = U[:,ind].transpose()
# The transformed data
Pcomponents = np.dot(Trans,P)
return Pcomponents, Trans, fracVar[ind]
def prctile(x, p = (0.0, 25.0, 50.0, 75.0, 100.0)):
"""
Return the percentiles of *x*. *p* can either be a sequence of
percentile values or a scalar. If *p* is a sequence, the ith
element of the return sequence is the *p*(i)-th percentile of *x*.
If *p* is a scalar, the largest value of *x* less than or equal to
the *p* percentage point in the sequence is returned.
"""
x = np.array(x).ravel() # we need a copy
x.sort()
Nx = len(x)
if not cbook.iterable(p):
return x[int(p*Nx/100.0)]
p = np.asarray(p)* Nx/100.0
ind = p.astype(int)
ind = np.where(ind>=Nx, Nx-1, ind)
return x.take(ind)
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). Eg if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max()<=1 or p.min()<0 or p.max()>100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x)
def center_matrix(M, dim=0):
"""
Return the matrix *M* with each row having zero mean and unit std.
If *dim* = 1 operate on columns instead of rows. (*dim* is
opposite to the numpy axis kwarg.)
"""
M = np.asarray(M, np.float_)
if dim:
M = (M - M.mean(axis=0)) / M.std(axis=0)
else:
M = (M - M.mean(axis=1)[:,np.newaxis])
M = M / M.std(axis=1)[:,np.newaxis]
return M
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try: Ny = len(y0)
except TypeError:
yout = np.zeros( (len(t),), np.float_)
else:
yout = np.zeros( (len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
mux=0.0, muy=0.0, sigmaxy=0.0):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond]
def get_sparse_matrix(M,N,frac=0.1):
"""
Return a *M* x *N* sparse matrix with *frac* elements randomly
filled.
"""
data = np.zeros((M,N))*0.
for i in range(int(M*N*frac)):
x = np.random.randint(0,M-1)
y = np.random.randint(0,N-1)
data[x,y] = np.random.rand()
return data
def dist(x,y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d,d))
def dist_point_to_segment(p, s0, s1):
"""
Get the distance of a point to a segment.
*p*, *s0*, *s1* are *xy* sequences
This algorithm from
http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment
"""
p = np.asarray(p, np.float_)
s0 = np.asarray(s0, np.float_)
s1 = np.asarray(s1, np.float_)
v = s1 - s0
w = p - s0
c1 = np.dot(w,v);
if ( c1 <= 0 ):
return dist(p, s0);
c2 = np.dot(v,v)
if ( c2 <= c1 ):
return dist(p, s1);
b = c1 / c2
pb = s0 + b * v;
return dist(p, pb)
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
def fftsurr(x, detrend=detrend_none, window=window_none):
"""
Compute an FFT phase randomized surrogate of *x*.
"""
if cbook.iterable(window):
x=window*detrend(x)
else:
x = window(detrend(x))
z = np.fft.fft(x)
a = 2.*np.pi*1j
phase = a * np.random.rand(len(x))
z = z*np.exp(phase)
return np.fft.ifft(z).real
def liaupunov(x, fprime):
"""
*x* is a very long trajectory from a map, and *fprime* returns the
derivative of *x*.
Returns :
.. math::
\lambda = \\frac{1}{n}\\sum \\ln|f^'(x_i)|
.. seealso::
Sec 10.5 Strogatz (1994) "Nonlinear Dynamics and Chaos".
`Wikipedia article on Lyapunov Exponent
<http://en.wikipedia.org/wiki/Lyapunov_exponent>`_.
.. note::
What the function here calculates may not be what you really want;
*caveat emptor*.
It also seems that this function's name is badly misspelled.
"""
return np.mean(np.log(np.absolute(fprime(x))))
class FIFOBuffer:
"""
A FIFO queue to hold incoming *x*, *y* data in a rotating buffer
using numpy arrays under the hood. It is assumed that you will
call asarrays much less frequently than you add data to the queue
-- otherwise another data structure will be faster.
This can be used to support plots where data is added from a real
time feed and the plot object wants to grab data from the buffer
and plot it to screen less freqeuently than the incoming.
If you set the *dataLim* attr to
:class:`~matplotlib.transforms.BBox` (eg
:attr:`matplotlib.Axes.dataLim`), the *dataLim* will be updated as
new data come in.
TODO: add a grow method that will extend nmax
.. note::
mlab seems like the wrong place for this class.
"""
def __init__(self, nmax):
"""
Buffer up to *nmax* points.
"""
self._xa = np.zeros((nmax,), np.float_)
self._ya = np.zeros((nmax,), np.float_)
self._xs = np.zeros((nmax,), np.float_)
self._ys = np.zeros((nmax,), np.float_)
self._ind = 0
self._nmax = nmax
self.dataLim = None
self.callbackd = {}
def register(self, func, N):
"""
Call *func* every time *N* events are passed; *func* signature
is ``func(fifo)``.
"""
self.callbackd.setdefault(N, []).append(func)
def add(self, x, y):
"""
Add scalar *x* and *y* to the queue.
"""
if self.dataLim is not None:
xys = ((x,y),)
self.dataLim.update(xys, -1) #-1 means use the default ignore setting
ind = self._ind % self._nmax
#print 'adding to fifo:', ind, x, y
self._xs[ind] = x
self._ys[ind] = y
for N,funcs in self.callbackd.items():
if (self._ind%N)==0:
for func in funcs:
func(self)
self._ind += 1
def last(self):
"""
Get the last *x*, *y* or *None*. *None* if no data set.
"""
if self._ind==0: return None, None
ind = (self._ind-1) % self._nmax
return self._xs[ind], self._ys[ind]
def asarrays(self):
"""
Return *x* and *y* as arrays; their length will be the len of
data added or *nmax*.
"""
if self._ind<self._nmax:
return self._xs[:self._ind], self._ys[:self._ind]
ind = self._ind % self._nmax
self._xa[:self._nmax-ind] = self._xs[ind:]
self._xa[self._nmax-ind:] = self._xs[:ind]
self._ya[:self._nmax-ind] = self._ys[ind:]
self._ya[self._nmax-ind:] = self._ys[:ind]
return self._xa, self._ya
def update_datalim_to_current(self):
"""
Update the *datalim* in the current data in the fifo.
"""
if self.dataLim is None:
raise ValueError('You must first set the dataLim attr')
x, y = self.asarrays()
self.dataLim.update_numerix(x, y, True)
def movavg(x,n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=np.float_)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid')
def save(fname, X, fmt='%.18e',delimiter=' '):
"""
Save the data in *X* to file *fname* using *fmt* string to convert the
data to strings.
*fname* can be a filename or a file handle. If the filename ends
in '.gz', the file is automatically saved in compressed gzip
format. The :func:`load` function understands gzipped files
transparently.
Example usage::
save('test.out', X) # X is an array
save('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays
save('test2.out', x) # x is 1D
save('test3.out', x, fmt='%1.4e') # use exponential notation
*delimiter* is used to separate the fields, eg. *delimiter* ','
for comma-separated values.
"""
if cbook.is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname,'wb')
else:
fh = file(fname,'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
origShape = None
if X.ndim == 1:
origShape = X.shape
X.shape = len(X), 1
for row in X:
fh.write(delimiter.join([fmt%val for val in row]) + '\n')
if origShape is not None:
X.shape = origShape
def load(fname,comments='#',delimiter=None, converters=None,skiprows=0,
usecols=None, unpack=False, dtype=np.float_):
"""
Load ASCII data from *fname* into an array and return the array.
The data must be regular, same number of values in every row
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'.
matfile data is not supported; for that, use :mod:`scipy.io.mio`
module.
Example usage::
X = load('test.dat') # data in two columns
t = X[:,0]
y = X[:,1]
Alternatively, you can do the same with "unpack"; see below::
X = load('test.dat') # a matrix of data
x = load('test.dat') # a single column of data
- *comments*: the character used to indicate the start of a comment
in the file
- *delimiter* is a string-like character used to seperate values
in the file. If *delimiter* is unspecified or *None*, any
whitespace string is a separator.
- *converters*, if not *None*, is a dictionary mapping column number to
a function that will convert that column to a float (or the optional
*dtype* if specified). Eg, if column 0 is a date string::
converters = {0:datestr2num}
- *skiprows* is the number of rows from the top to skip.
- *usecols*, if not *None*, is a sequence of integer column indexes to
extract where 0 is the first column, eg ``usecols=[1,4,5]`` to extract
just the 2nd, 5th and 6th columns
- *unpack*, if *True*, will transpose the matrix allowing you to unpack
into named arguments on the left hand side::
t,y = load('test.dat', unpack=True) # for two column data
x,y,z = load('somefile.dat', usecols=[3,5,7], unpack=True)
- *dtype*: the array will have this dtype. default: ``numpy.float_``
.. seealso::
See :file:`examples/pylab_examples/load_converter.py` in the source tree:
Exercises many of these options.
"""
if converters is None: converters = {}
fh = cbook.to_filehandle(fname)
X = []
if delimiter==' ':
# space splitting is a special case since x.split() is what
# you want, not x.split(' ')
def splitfunc(x):
return x.split()
else:
def splitfunc(x):
return x.split(delimiter)
converterseq = None
for i,line in enumerate(fh):
if i<skiprows: continue
line = line.split(comments, 1)[0].strip()
if not len(line): continue
if converterseq is None:
converterseq = [converters.get(j,float)
for j,val in enumerate(splitfunc(line))]
if usecols is not None:
vals = splitfunc(line)
row = [converterseq[j](vals[j]) for j in usecols]
else:
row = [converterseq[j](val)
for j,val in enumerate(splitfunc(line))]
thisLen = len(row)
X.append(row)
X = np.array(X, dtype)
r,c = X.shape
if r==1 or c==1:
X.shape = max(r,c),
if unpack: return X.transpose()
else: return X
def slopes(x,y):
"""
SLOPES calculate the slope y'(x) Given data vectors X and Y SLOPES
calculates Y'(X), i.e the slope of a curve Y(X). The slope is
estimated using the slope obtained from that of a parabola through
any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between x-
and y-values. For many functions, however, the abscissa are given
in different dimensions, so an aspect ratio is completely
arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
STINEMAN_INTERP Well behaved data interpolation. Given data
vectors X and Y, the slope vector YP and a new abscissa vector XI
the function stineman_interp(xi,x,y,yp) uses Stineman
interpolation to calculate a vector YI corresponding to XI.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa:
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For yp=None, the routine automatically determines the slopes using
the "slopes" routine.
X is assumed to be sorted in increasing order
For values xi[j] < x[0] or xi[j] > x[-1], the routine tries a
extrapolation. The relevance of the data obtained from this, of
course, questionable...
original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
def inside_poly(points, verts):
"""
points is a sequence of x,y points
verts is a sequence of x,y vertices of a poygon
return value is a sequence of indices into points for the points
that are inside the polygon
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(ymin, xs, ys):
"""
given a arrays *xs* and *ys*, return the vertices of a polygon
that has a scalar lower bound *ymin* and an upper bound at the *ys*.
intended for use with Axes.fill, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
return poly_between(xs, ys, xmin)
def poly_between(x, ylower, yupper):
"""
given a sequence of x, ylower and yupper, return the polygon that
fills the regions between them. ylower or yupper can be scalar or
iterable. If they are iterable, they must be equal in length to x
return value is x, y arrays for use with Axes.fill
"""
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*np.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*np.ones(Nx)
x = np.concatenate( (x, x[::-1]) )
y = np.concatenate( (yupper, ylower[::-1]) )
return x,y
### the following code was written and submitted by Fernando Perez
### from the ipython numutils package under a BSD license
# begin fperez functions
"""
A set of convenient utilities for numerical work.
Most of this module requires numpy or is meant to be used with it.
Copyright (c) 2001-2004, Fernando Perez. <Fernando.Perez@colorado.edu>
All rights reserved.
This license was generated from the BSD license template as found in:
http://www.opensource.org/licenses/bsd-license.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the IPython project nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import operator
import math
#*****************************************************************************
# Globals
#****************************************************************************
# function definitions
exp_safe_MIN = math.log(2.2250738585072014e-308)
exp_safe_MAX = 1.7976931348623157e+308
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return exp(np.clip(x,exp_safe_MIN,exp_safe_MAX))
else:
return math.exp(x)
def amap(fn,*args):
"""
amap(function, sequence[, sequence, ...]) -> array.
Works like :func:`map`, but it returns an array. This is just a
convenient shorthand for ``numpy.array(map(...))``.
"""
return np.array(map(fn,*args))
#from numpy import zeros_like
def zeros_like(a):
"""
Return an array of zeros of the shape and typecode of *a*.
"""
warnings.warn("Use numpy.zeros_like(a)", DeprecationWarning)
return np.zeros_like(a)
#from numpy import sum as sum_flat
def sum_flat(a):
"""
Return the sum of all the elements of *a*, flattened out.
It uses ``a.flat``, and if *a* is not contiguous, a call to
``ravel(a)`` is made.
"""
warnings.warn("Use numpy.sum(a) or a.sum()", DeprecationWarning)
return np.sum(a)
#from numpy import mean as mean_flat
def mean_flat(a):
"""
Return the mean of all the elements of *a*, flattened out.
"""
warnings.warn("Use numpy.mean(a) or a.mean()", DeprecationWarning)
return np.mean(a)
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.absolute(a)**2))
def l1norm(a):
"""
Return the *l1* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sum(np.absolute(a))
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.absolute(a)**2))
def norm_flat(a,p=2):
"""
norm(a,p=2) -> l-p norm of a.flat
Return the l-p norm of *a*, considered as a flat array. This is NOT a true
matrix norm, since arrays of arbitrary rank are always flattened.
*p* can be a number or the string 'Infinity' to get the L-infinity norm.
"""
# This function was being masked by a more general norm later in
# the file. We may want to simply delete it.
if p=='Infinity':
return np.amax(np.absolute(a))
else:
return (np.sum(np.absolute(a)**p))**(1.0/p)
def frange(xini,xfin=None,delta=None,**kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
#defaults
kw.setdefault('closed',1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin == None:
xfin = xini + 0.0
xini = 0.0
if delta == None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts=kw['npts']
delta=(xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(round((xfin-xini)/delta)) + endpoint
#npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini
# end frange()
#import numpy.diag as diagonal_matrix
def diagonal_matrix(diag):
"""
Return square diagonal matrix whose non-zero elements are given by the
input array.
"""
warnings.warn("Use numpy.diag(d)", DeprecationWarning)
return np.diag(diag)
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
warnings.warn("Use dtype kwarg instead of typecode",
DeprecationWarning)
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden
def base_repr (number, base = 2, padding = 0):
"""
Return the representation of a *number* in any given *base*.
"""
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if number < base: \
return (padding - 1) * chars [0] + chars [int (number)]
max_exponent = int (math.log (number)/math.log (base))
max_power = long (base) ** max_exponent
lead_digit = int (number/max_power)
return chars [lead_digit] + \
base_repr (number - max_power * lead_digit, base, \
max (padding - 1, max_exponent))
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1))
digits = map (operator.mod, shifts, max_length * [2])
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','')
def log2(x,ln2 = math.log(2.0)):
"""
Return the log(*x*) in base 2.
This is a _slow_ function but which is guaranteed to return the correct
integer value if the input is an integer exact power of 2.
"""
try:
bin_n = binary_repr(x)[1:]
except (AssertionError,TypeError):
return math.log(x)/ln2
else:
if '1' in bin_n:
return math.log(x)/ln2
else:
return len(bin_n)
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n)
def isvector(X):
"""
Like the Matlab (TM) function with the same name, returns *True*
if the supplied numpy array or matrix *X* looks like a vector,
meaning it has a one non-singleton axis (i.e., it can have
multiple axes, but all must have length 1, except for one of
them).
If you just want to see if the array has 1 axis, use X.ndim == 1.
"""
return np.prod(X.shape)==np.max(X.shape)
#from numpy import fromfunction as fromfunction_kw
def fromfunction_kw(function, dimensions, **kwargs):
"""
Drop-in replacement for :func:`numpy.fromfunction`.
Allows passing keyword arguments to the desired function.
Call it as (keywords are optional)::
fromfunction_kw(MyFunction, dimensions, keywords)
The function ``MyFunction`` is responsible for handling the
dictionary of keywords it will receive.
"""
warnings.warn("Use numpy.fromfunction()", DeprecationWarning)
return np.fromfunction(function, dimensions, **kwargs)
### end fperez numutils code
def rem(x,y):
"""
Deprecated - see :func:`numpy.remainder`
"""
raise NotImplementedError('Deprecated - see numpy.remainder')
def norm(x,y=2):
"""
Deprecated - see :func:`numpy.linalg.norm`
"""
raise NotImplementedError('Deprecated - see numpy.linalg.norm')
def orth(A):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def rank(x):
"""
Deprecated - see :func:`numpy.rank`
"""
raise NotImplementedError('Deprecated - see numpy.rank')
def sqrtm(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - see scipy.linalg.sqrtm')
def mfuncC(f, x):
"""
Deprecated
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
def approx_real(x):
"""
Deprecated - needs clean room implementation
"""
raise NotImplementedError('Deprecated - needs clean room implementation')
#helpers for loading, saving, manipulating and viewing numpy record arrays
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isnan(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def safe_isinf(x):
':func:`numpy.isinf` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isinf(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def rec_view(rec):
"""
Return a view of an ndarray as a recarray
.. seealso::
http://projects.scipy.org/pipermail/numpy-discussion/2008-August/036429.html
"""
return rec.view(np.recarray)
#return rec.view(dtype=(np.record, rec.dtype), type=np.recarray)
def rec_append_field(rec, name, arr, dtype=None):
"""
Return a new record array with field name populated with data from
array *arr*. This function is Deprecated. Please use
:func:`rec_append_fields`.
"""
warnings.warn("use rec_append_fields", DeprecationWarning)
return rec_append_fields(rec, name, arr, dtype)
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not cbook.is_string_like(names) and cbook.iterable(names) \
and len(names) and cbook.is_string_like(names[0])):
if len(names) != len(arrs):
raise ValueError, "number of arrays do not match number of names"
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = map(np.asarray, arrs)
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError, "dtypes must be None, a single dtype or a list"
newdtype = np.dtype(rec.dtype.descr + zip(names, dtypes))
newrec = np.empty(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return rec_view(newrec)
def rec_drop_fields(rec, names):
"""
Return a new numpy record array with fields in *names* dropped.
"""
names = set(names)
Nr = len(rec)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.empty(Nr, dtype=newdtype)
for field in newdtype.names:
newrec[field] = rec[field]
return rec_view(newrec)
def rec_groupby(r, groupby, stats):
"""
*r* is a numpy record array
*groupby* is a sequence of record array attribute names that
together form the grouping key. eg ('date', 'productcode')
*stats* is a sequence of (*attr*, *func*, *outname*) tuples which
will call ``x = func(attr)`` and assign *x* to the record array
output with attribute *outname*. For example::
stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
Return record array has *dtype* names for each attribute name in
the the *groupby* argument, with the associated group values, and
for each outname name in the *stats* argument, with the associated
stat summary output.
"""
# build a dictionary from groupby keys-> list of indices into r with
# those keys
rowd = dict()
for i, row in enumerate(r):
key = tuple([row[attr] for attr in groupby])
rowd.setdefault(key, []).append(i)
# sort the output by groupby keys
keys = rowd.keys()
keys.sort()
rows = []
for key in keys:
row = list(key)
# get the indices for this groupby key
ind = rowd[key]
thisr = r[ind]
# call each stat function for this groupby slice
row.extend([func(thisr[attr]) for attr, func, outname in stats])
rows.append(row)
# build the output record array with groupby and outname attributes
attrs, funcs, outnames = zip(*stats)
names = list(groupby)
names.extend(outnames)
return np.rec.fromrecords(rows, names=names)
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names)
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if cbook.is_string_like(key):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s'%name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s'%name)
def makekey(row):
return tuple([row[name] for name in key])
r1d = dict([(makekey(row),i) for i,row in enumerate(r1)])
r2d = dict([(makekey(row),i) for i,row in enumerate(r2)])
r1keys = set(r1d.keys())
r2keys = set(r2d.keys())
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'if name is a string key, use the larger size of r1 or r2 before merging'
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r1.dtype[name]
assert dt2==dt1
if dt1.num>dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names: return name
else: return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names: return name
else: return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key]
newdtype = np.dtype(keydesc + r1desc + r2desc)
newrec = np.empty(common_len + left_len + right_len, dtype=newdtype)
if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse
newrec_fields = newrec.dtype.fields.keys()
for k, v in defaults.items():
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind]
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return rec_view(newrec)
def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
converterd=None, names=None, missing='', missingd=None,
use_mrecords=True):
"""
Load data from comma/space/tab delimited file in *fname* into a
numpy record array and return the record array.
If *names* is *None*, a header row is required to automatically
assign the recarray names. The headers will be lower cased,
spaces will be converted to underscores, and illegal attribute
name characters removed. If *names* is not *None*, it is a
sequence of names to use for the column names. In this case, it
is assumed there is no header row.
- *fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
- *comments*: the character used to indicate the start of a comment
in the file
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *converted*: if not *None*, is a dictionary mapping column number or
munged column name to a converter function.
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
- *missingd* is a dictionary mapping munged column names to field values
which signify that the field does not contain actual data and should
be masked, e.g. '0000-00-00' or 'unused'
- *missing*: a string whose value signals a missing field regardless of
the column it appears in
- *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing
If no rows are found, *None* is returned -- see :file:`examples/loadrec.py`
"""
if converterd is None:
converterd = dict()
if missingd is None:
missingd = {}
import dateutil.parser
import datetime
parsedate = dateutil.parser.parse
fh = cbook.to_filehandle(fname)
class FH:
"""
For space-delimited files, we want different behavior than
comma or tab. Generally, we want multiple spaces to be
treated as a single separator, whereas with comma and tab we
want multiple commas to return multiple (empty) fields. The
join/strip trick below effects this.
"""
def __init__(self, fh):
self.fh = fh
def close(self):
self.fh.close()
def seek(self, arg):
self.fh.seek(arg)
def fix(self, s):
return ' '.join(s.split())
def next(self):
return self.fix(self.fh.next())
def __iter__(self):
for line in self.fh:
yield self.fix(line)
if delimiter==' ':
fh = FH(fh)
reader = csv.reader(fh, delimiter=delimiter)
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i>=(skiprows-1): break
return fh, reader
process_skiprows(reader)
def ismissing(name, val):
"Should the value val in column name be masked?"
if val == missing or val == missingd.get(name) or val == '':
return True
else:
return False
def with_default_value(func, default):
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val)
return newfunc
def mybool(x):
if x=='True': return True
elif x=='False': return False
else: raise ValueError('invalid bool')
dateparser = dateutil.parser.parse
mydateparser = with_default_value(dateparser, datetime.date(1,1,1))
myfloat = with_default_value(float, np.nan)
myint = with_default_value(int, -1)
mystr = with_default_value(str, '')
mybool = with_default_value(mybool, None)
def mydate(x):
# try and return a date object
d = dateparser(x)
if d.hour>0 or d.minute>0 or d.second>0:
raise ValueError('not a date')
return d.date()
mydate = with_default_value(mydate, datetime.date(1,1,1))
def get_func(name, item, func):
# promote functions in this order
funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr}
try: func(name, item)
except:
if func==mystr:
raise ValueError('Could not find a working conversion function')
else: return get_func(name, item, funcmap[func]) # recurse
else: return func
# map column names that clash with builtins -- TODO - extend this list
itemd = {
'return' : 'return_',
'file' : 'file_',
'print' : 'print_',
}
def get_converters(reader):
converters = None
for i, row in enumerate(reader):
if i==0:
converters = [mybool]*len(row)
if checkrows and i>checkrows:
break
#print i, len(names), len(row)
#print 'converters', zip(converters, row)
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
#if not item.strip(): continue
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters
# Get header and remove invalid characters
needheader = names is None
if needheader:
for row in reader:
#print 'csv2rec', row
if len(row) and row[0].startswith(comments):
continue
headers = row
break
# remove these chars
delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
delete.add('"')
names = []
seen = dict()
for i, item in enumerate(headers):
item = item.strip().lower().replace(' ', '_')
item = ''.join([c for c in item if c not in delete])
if not len(item):
item = 'column%d'%i
item = itemd.get(item, item)
cnt = seen.get(item, 0)
if cnt>0:
names.append(item + '_%d'%cnt)
else:
names.append(item)
seen[item] = cnt+1
else:
if cbook.is_string_like(names):
names = [n.strip() for n in names.split(',')]
# get the converter functions by inspecting checkrows
converters = get_converters(reader)
if converters is None:
raise ValueError('Could not find any valid data in CSV file')
# reset the reader and start over
fh.seek(0)
reader = csv.reader(fh, delimiter=delimiter)
process_skiprows(reader)
if needheader:
skipheader = reader.next()
# iterate over the remaining rows and convert the data to date
# objects, ints, or floats as approriate
rows = []
rowmasks = []
for i, row in enumerate(reader):
if not len(row): continue
if row[0].startswith(comments): continue
rows.append([func(name, val) for func, name, val in zip(converters, names, row)])
rowmasks.append([ismissing(name, val) for name, val in zip(names, row)])
fh.close()
if not len(rows):
return None
if use_mrecords and np.any(rowmasks):
try: from numpy.ma import mrecords
except ImportError:
raise RuntimeError('numpy 1.05 or later is required for masked array support')
else:
r = mrecords.fromrecords(rows, names=names, mask=rowmasks)
else:
r = np.rec.fromrecords(rows, names=names)
return r
# a series of classes for describing the format intentions of various rec views
class FormatObj:
def tostr(self, x):
return self.toval(x)
def toval(self, x):
return str(x)
def fromstr(self, s):
return s
class FormatString(FormatObj):
def tostr(self, x):
val = repr(x)
return val[1:-1]
#class FormatString(FormatObj):
# def tostr(self, x):
# return '"%r"'%self.toval(x)
class FormatFormatStr(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def tostr(self, x):
if x is None: return 'None'
return self.fmt%self.toval(x)
class FormatFloat(FormatFormatStr):
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%%1.%df'%precision)
self.precision = precision
self.scale = scale
def toval(self, x):
if x is not None:
x = x * self.scale
return x
def fromstr(self, s):
return float(s)/self.scale
class FormatInt(FormatObj):
def tostr(self, x):
return '%d'%int(x)
def toval(self, x):
return int(x)
def fromstr(self, s):
return int(s)
class FormatBool(FormatObj):
def toval(self, x):
return str(x)
def fromstr(self, s):
return bool(s)
class FormatPercent(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.)
class FormatThousands(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3)
class FormatMillions(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6)
class FormatDate(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def toval(self, x):
if x is None: return 'None'
return x.strftime(self.fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x).date()
class FormatDatetime(FormatDate):
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x)
defaultformatd = {
np.bool_ : FormatBool(),
np.int16 : FormatInt(),
np.int32 : FormatInt(),
np.int64 : FormatInt(),
np.float32 : FormatFloat(),
np.float64 : FormatFloat(),
np.object_ : FormatObj(),
np.string_ : FormatString(),
}
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd
def csvformat_factory(format):
format = copy.deepcopy(format)
if isinstance(format, FormatFloat):
format.scale = 1. # override scaling for storage
format.fmt = '%r'
return format
def rec2txt(r, header=None, padding=3, precision=3):
"""
Returns a textual representation of a record array.
*r*: numpy recarray
*header*: list of column headers
*padding*: space between each column
*precision*: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
Example::
precision=[0,2,3]
Output::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item,atype=int):
tdict = {None:int, int:float, float:str}
try: atype(str(item))
except: return get_type(item,tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = type(column[0])
if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_:
length = max(len(colname),column.itemsize)
return 0, length+padding, "%s" # left justify
if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_:
length = max(len(colname),np.max(map(len,map(str,column))))
return 1, length+padding, "%d" # right justify
# JDH: my powerbook does not have np.float96 using np 1.3.0
"""
In [2]: np.__version__
Out[2]: '1.3.0.dev5948'
In [3]: !uname -a
Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386
In [4]: np.float96
---------------------------------------------------------------------------
AttributeError Traceback (most recent call la
"""
if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_:
fmt = "%." + str(precision) + "f"
length = max(len(colname),np.max(map(len,map(lambda x:fmt%x,column))))
return 1, length+padding, fmt # right justify
return 0, max(len(colname),np.max(map(len,map(str,column))))+padding, "%s"
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just,pad,prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just,pad,prec,0))
else:
pjust,ppad,pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just,pad-padding,prec,0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just,pad,prec,padding))
else:
justify_pad_prec_spacer.append((just,pad,prec,0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec%float(item))
elif get_type(item) == int:
item = (prec%int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))
if i==0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
.. seealso::
:func:`csv2rec`:
For information about *missing* and *missingd*, which can
be used to fill in masked values into your CSV file.
"""
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'w', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x,y,z,xi,yi):
"""
``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* =
*f*(*x*, *y*) to the data in the (usually) nonuniformly spaced
vectors (*x*, *y*, *z*). :func:`griddata` interpolates this
surface at the points specified by (*xi*, *yi*) to produce
*zi*. *xi* and *yi* must describe a regular grid, can be either 1D
or 2D, but must be monotonically increasing.
A masked array is returned if any grid points are outside convex
hull defined by input data (no extrapolation is done).
Uses natural neighbor interpolation based on Delaunay
triangulation. By default, this algorithm is provided by the
:mod:`matplotlib.delaunay` package, written by Robert Kern. The
triangulation algorithm in this package is known to fail on some
nearly pathological cases. For this reason, a separate toolkit
(:mod:`mpl_tookits.natgrid`) has been created that provides a more
robust algorithm fof triangulation and interpolation. This
toolkit is based on the NCAR natgrid library, which contains code
that is not redistributable under a BSD-compatible license. When
installed, this function will use the :mod:`mpl_toolkits.natgrid`
algorithm, otherwise it will use the built-in
:mod:`matplotlib.delaunay` package.
The natgrid matplotlib toolkit can be downloaded from
http://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792
"""
try:
from mpl_toolkits.natgrid import _natgrid, __version__
_use_natgrid = True
except ImportError:
import matplotlib.delaunay as delaunay
from matplotlib.delaunay import __version__
_use_natgrid = False
if not griddata._reported:
if _use_natgrid:
verbose.report('using natgrid version %s' % __version__)
else:
verbose.report('using delaunay version %s' % __version__)
griddata._reported = True
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if not len(x)==len(y)==len(z):
raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
if hasattr(z,'mask'):
x = x.compress(z.mask == False)
y = y.compress(z.mask == False)
z = z.compressed()
if _use_natgrid: # use natgrid toolkit if available.
if xi.ndim == 2:
xi = xi[0,:]
yi = yi[:,0]
# override default natgrid internal parameters.
_natgrid.seti('ext',0)
_natgrid.setr('nul',np.nan)
# cast input arrays to doubles (this makes a copy)
x = x.astype(np.float)
y = y.astype(np.float)
z = z.astype(np.float)
xo = xi.astype(np.float)
yo = yi.astype(np.float)
if min(xo[1:]-xo[0:-1]) < 0 or min(yo[1:]-yo[0:-1]) < 0:
raise ValueError, 'output grid defined by xi,yi must be monotone increasing'
# allocate array for output (buffer will be overwritten by nagridd)
zo = np.empty((yo.shape[0],xo.shape[0]), np.float)
_natgrid.natgridd(x,y,z,xo,yo,zo)
else: # use Robert Kern's delaunay package from scikits (default)
if xi.ndim != yi.ndim:
raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
if xi.ndim != 1 and xi.ndim != 2:
raise TypeError("inputs xi and yi must be 1D or 2D.")
if xi.ndim == 1:
xi,yi = np.meshgrid(xi,yi)
# triangulate data
tri = delaunay.Triangulation(x,y)
# interpolate data
interp = tri.nn_interpolator(z)
zo = interp(xi,yi)
# mask points on grid outside convex hull of input data.
if np.any(np.isnan(zo)):
zo = np.ma.masked_where(np.isnan(zo),zo)
return zo
griddata._reported = False
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi): xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile( np.nan, s )
for ii,xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx<x[0]:
if extrap:
yi[ii] = y[0]
elif xx>x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x<xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x,y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
N=len(y)
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
res, = np.nonzero(nxutils.points_inside_poly(points, verts))
return res
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, eg::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):
nx = ma
else:
nx = np
xs = nx.asarray(xs)
ys = nx.asarray(ys)
Nx = len(xs)
Ny = len(ys)
assert(Nx==Ny)
x = xmin*nx.ones(2*Nx)
y = nx.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):
nx = ma
else:
nx = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*nx.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*nx.ones(Nx)
x = nx.concatenate( (x, x[::-1]) )
y = nx.concatenate( (yupper, ylower[::-1]) )
return x,y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
TODO: this is a pure python implementation which probably has a much faster numpy impl
"""
in_region = None
boundaries = []
for i, val in enumerate(mask):
if in_region is None and val:
in_region = i
elif in_region is not None and not val:
boundaries.append((in_region, i))
in_region = None
if in_region is not None:
boundaries.append((in_region, i+1))
return boundaries
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths( X, P=2., axis=None ):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P),axis=axis))**(1./P)
def distances_along_curve( X ):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff( X, axis=0 )
return vector_lengths(X,axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate( (np.zeros(1), np.cumsum(X)) )
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
# c3x, c3y = q2x, q2y
return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
| agpl-3.0 |
celiafish/VisTrails | vistrails/tests/runtestsuite.py | 2 | 20120 | #!/usr/bin/env python
# pragma: no testimport
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""Runs all tests available in VisTrails modules by importing all of
them, stealing the classes that look like unit tests, and running
all of them.
runtestsuite.py also reports all VisTrails modules that don't export
any unit tests, as a crude measure of code coverage.
"""
import atexit
from distutils.version import LooseVersion
#import doctest
import locale
import os
import sys
import traceback
from optparse import OptionParser
import platform
import re
import shutil
import tempfile
if 'vistrails' not in sys.modules:
# Makes sure we can import modules as if we were running VisTrails
# from the root directory
_this_dir = os.path.dirname(os.path.realpath(__file__))
_root_directory = os.path.realpath(os.path.join(_this_dir, '..'))
sys.path.insert(0, os.path.realpath(os.path.join(_root_directory, '..')))
# Use a different temporary directory
test_temp_dir = tempfile.mkdtemp(prefix='vt_testsuite_')
tempfile.tempdir = test_temp_dir
@apply
class clean_tempdir(object):
def __init__(self):
atexit.register(self.clean)
self.listdir = os.listdir
self.isdir = os.path.isdir
self.test_temp_dir = test_temp_dir
self.rmtree = shutil.rmtree
self.out = sys.stdout.write
def clean(self):
nb_dirs = 0
nb_files = 0
for f in self.listdir(self.test_temp_dir):
if self.isdir(f):
nb_dirs += 1
else:
nb_files += 1
if nb_dirs > 0 or nb_files > 0:
self.out("Warning: %d dirs and %d files were left behind in "
"tempdir, cleaning up\n" % (nb_dirs, nb_files))
self.rmtree(self.test_temp_dir, ignore_errors=True)
# Parse the command-line
usage = "Usage: %prog [options] [module1 module2 ...]"
parser = OptionParser(usage=usage)
parser.add_option("-V", "--verbose", action="store", type="int",
default=0, dest="verbose",
help="set verboseness level(0--2, default=0, "
"higher means more verbose)")
parser.add_option("-v", "--vistrails-verbose", action="store", type="int",
default=0, dest="debugLevel",
help="set the debugLevel in VisTrails (0--2, default=0)")
parser.add_option("-e", "--examples", action="store_true",
default=False,
help="run vistrails examples")
parser.add_option("-i", "--images", action="store_true",
default=False,
help="perform image comparisons")
parser.add_option("--installbundles", action='store_true',
default=False,
help=("Attempt to install missing Python packages "
"automatically"))
parser.add_option("-S", "--startup", action="store", type="str", default=None,
dest="dotVistrails",
help="Set startup file (default is temporary directory)")
parser.add_option('-L', '--locale', action='store', type='str', default='',
dest='locale',
help="set locale to this string")
parser.add_option('-D', '--debug', action='store_true',
default=False,
help="start interactive debugger on unexpected error")
parser.add_option('--no-unbuffered', action='store_false', dest='unbuffered',
default=True,
help="Don't make output stream unbuffered")
(options, test_modules) = parser.parse_args()
# remove empty strings
test_modules = filter(len, test_modules)
verbose = options.verbose
locale.setlocale(locale.LC_ALL, options.locale or '')
test_examples = options.examples
test_images = options.images
installbundles = options.installbundles
dotVistrails = options.dotVistrails
debug_mode = options.debug
vistrails_verbose = options.debugLevel
# Makes stdout unbuffered, so python -u is not needed
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
if options.unbuffered:
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
# Use PyQt API v2
def setNewPyQtAPI():
try:
import sip
# We now use the new PyQt API - IPython needs it
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
except Exception:
print "Could not set PyQt API, is PyQt4 installed?"
setNewPyQtAPI()
# Log to the console
import vistrails.core.debug
vistrails.core.debug.DebugPrint.getInstance().log_to_console()
import vistrails.tests
import vistrails.core
import vistrails.core.db.io
import vistrails.core.db.locator
from vistrails.core import debug
import vistrails.gui.application
from vistrails.core.system import vistrails_root_directory, \
vistrails_examples_directory
from vistrails.core.packagemanager import get_package_manager
# VisTrails does funny stuff with unittest/unittest2, be sure to load that
# after vistrails
import unittest
root_directory = os.path.realpath(vistrails_root_directory())
###############################################################################
# Testing Examples
EXAMPLES_PATH = vistrails_examples_directory()
#dictionary of examples that will be run with the workflows that will be ignored
VT_EXAMPLES = { 'EMBOSS_webservices.vt': ["ProphetOutput"],
'KEGGPathway.vt': [],
'KEGG_SearchEntities_webservice.vt': [],
'KEGG_webservices.vt': [],
'brain_vistrail.vt': [],
'chebi_webservice.vt': [],
'head.vt': [],
'infovis.vt': [],
'noaa_webservices.vt': [],
'offscreen.vt': [],
'plot.vt': [],
'spx.vt': [],
'structure_or_id_webservice.vt': [],
'terminator.vt': ["Isosurface Script"],
'triangle_area.vt': [],
'vtk.vt': [],
'vtk_book_3rd_p189.vt': ["quadric", "SmapleFunction",
"Almost there"],
'vtk_book_3rd_p193.vt': ["modules", "connections",
"lookup table"],
'vtk_http.vt': [],
}
###############################################################################
# Utility
def sub_print(s, overline=False):
"""Prints line with underline (and optionally overline) ASCII dashes."""
if overline:
print "-" * len(s)
print s
print "-" * len(s)
###############################################################################
if len(test_modules) > 0:
test_modules = test_modules
else:
test_modules = None
if os.path.exists(EXAMPLES_PATH):
test_images = True
def module_filter(name):
if test_modules is None:
return True
for mod in test_modules:
if name.startswith(mod):
return True
return False
###############################################################################
# reinitializing arguments and options so VisTrails does not try parsing them
sys.argv = sys.argv[:1]
# creates the app so that testing can happen
# We need the windows so we can test events, etc.
optionsDict = {
'batch': False,
'executionLog': False,
'singleInstance': False,
'installBundles': installbundles,
'enablePackagesSilently': True,
'handlerDontAsk': True,
'developerDebugger': debug_mode,
'debugLevel': vistrails_verbose
}
if dotVistrails:
optionsDict['dotVistrails'] = dotVistrails
else:
optionsDict['spawned'] = True
v = vistrails.gui.application.start_application(optionsDict)
if v != 0:
app = vistrails.gui.application.get_vistrails_application()
if app:
app.finishSession()
sys.exit(v)
# make sure that fixedCellSize is turned on
spreadsheet_conf = get_package_manager().get_package_configuration("spreadsheet")
spreadsheet_conf.fixedCellSize = True
# disable first vistrail
app = vistrails.gui.application.get_vistrails_application()
app.builderWindow.auto_view = False
app.builderWindow.close_all_vistrails(True)
print "Test Suite for VisTrails"
print "Locale settings: %s" % ', '.join('%s: %s' % (s, locale.setlocale(getattr(locale, s), None)) for s in ('LC_ALL', 'LC_TIME'))
print "Running on %s" % ', '.join(platform.uname())
print "Python is %s" % sys.version
try:
from PyQt4 import QtCore
print "Using PyQt4 %s with Qt %s" % (QtCore.PYQT_VERSION_STR, QtCore.qVersion())
except ImportError:
print "PyQt4 not available"
for pkg in ('numpy', 'scipy', 'matplotlib'):
try:
ipkg = __import__(pkg, globals(), locals(), [], -1)
print "Using %s %s" % (pkg, ipkg.__version__)
except ImportError:
print "%s not available" % pkg
try:
import vtk
print "Using vtk %s" % vtk.vtkVersion().GetVTKVersion()
except ImportError:
print "vtk not available"
print ""
tests_passed = True
main_test_suite = unittest.TestSuite()
test_loader = unittest.TestLoader()
import_skip_regex = re.compile(r'(?i)# *pragma[: ]*no *testimport')
if test_modules:
sub_print("Trying to import some of the modules")
else:
sub_print("Trying to import all modules")
for (p, subdirs, files) in os.walk(root_directory):
# skip subversion subdirectories
if p.find('.svn') != -1 or p.find('.git') != -1 :
continue
for filename in files:
# skip files that don't look like VisTrails python modules
if not filename.endswith('.py'):
continue
module_file = os.path.join(p, filename)
module = os.path.join("vistrails", p[len(root_directory)+1:],
filename[:-3])
if (module.startswith(os.sep) or
('#' in module)):
continue
# use qualified import names with periods instead of
# slashes to avoid duplicates in sys.modules
module = module.replace('/','.')
module = module.replace('\\','.')
if module.endswith('__init__'):
module = module[:-9]
if not module_filter(module):
continue
if module.startswith('vistrails.tests.resources'):
continue
if ('.system.' in module and not
module.endswith('__init__')):
continue
with open(module_file) as fp:
l = fp.readline()
if l.startswith('#!'): # shebang
l = fp.readline()
if import_skip_regex.match(l):
if verbose >= 1:
print >>sys.stderr, ("Skipping %s, not an importable "
"module" % module)
continue
m = None
try:
if '.' in module:
m = __import__(module, globals(), locals(), ['foo'])
else:
m = __import__(module)
except BaseException:
print >>sys.stderr, "ERROR: Could not import module: %s" % module
if verbose >= 1:
traceback.print_exc(file=sys.stderr)
continue
# Load the unittest TestCases
suite = test_loader.loadTestsFromModule(m)
# Load the doctests
#try:
# suite.addTests(doctest.DocTestSuite(m))
#except ValueError:
# pass # No doctest is fine, we check that some tests exist later
# The doctests are currently opt-in; a load_tests method can be
# defined to build a DocTestSuite
# This is because some modules have interpreter-formatted examples that
# are NOT doctests, and because mining the codebase for doctests is
# painfully slow
main_test_suite.addTests(suite)
if suite.countTestCases() == 0 and verbose >= 1:
print >>sys.stderr, "WARNING: module has no tests: %s" % module
elif verbose >= 2:
print >>sys.stderr, "OK: module as %d test cases: %s" % (
suite.countTestCases(),
module)
sub_print("Imported modules. Running %d tests%s..." % (
main_test_suite.countTestCases(),
", and thumbnails comparison" if test_images else ''),
overline=True)
############## TEST VISTRAIL IMAGES ####################
# Compares thumbnails with the generated images to detect broken visualizations
image_tests = [("terminator.vt", [("terminator_isosurface", "Isosurface"),
("terminator_VRSW", "Volume Rendering SW"),
("terminator_CPSW", "Clipping Plane SW"),
("terminator_CRSW", "Combined Rendering SW"),
("terminator_ISSW", "Image Slices SW")])
]
compare_use_vtk = False
try:
import vtk
if LooseVersion(vtk.vtkVersion().GetVTKVersion()) >= LooseVersion('5.8.0'):
compare_use_vtk = True
except ImportError:
pass
if compare_use_vtk:
def compare_thumbnails(prev, next):
#vtkImageDifference assumes RGB, so strip alpha
def removeAlpha(file):
freader = vtk.vtkPNGReader()
freader.SetFileName(file)
removealpha = vtk.vtkImageExtractComponents()
removealpha.SetComponents(0,1,2)
removealpha.SetInputConnection(freader.GetOutputPort())
removealpha.Update()
return removealpha.GetOutput()
#do the image comparison
a = removeAlpha(prev)
b = removeAlpha(next)
idiff = vtk.vtkImageDifference()
idiff.SetInput(a)
idiff.SetImage(b)
idiff.Update()
return idiff.GetThresholdedError()
else:
try:
from scipy.misc import imread
except ImportError:
imread = None
if test_images:
print "Warning: old VTK version detected, NOT comparing thumbnails"
if imread is not None:
def compare_thumbnails(prev, next):
prev_img = imread(prev)
next_img = imread(next)
assert len(prev_img.shape) == 3
assert len(next_img.shape) == 3
if prev_img.shape[:2] == next_img.shape[:2]:
return 0
else:
return float('Inf')
else:
def compare_thumbnails(prev, next):
if os.path.isfile(prev) and os.path.isfile(next):
return 0
else:
return float('Inf')
def image_test_generator(vtfile, version):
from vistrails.core.db.locator import FileLocator
from vistrails.core.db.io import load_vistrail
import vistrails.core.console_mode
def test(self):
try:
errs = []
filename = os.path.join(EXAMPLES_PATH, vtfile)
locator = FileLocator(os.path.abspath(filename))
(v, abstractions, thumbnails, mashups) = load_vistrail(locator)
errs = vistrails.core.console_mode.run(
[(locator, version)],
update_vistrail=False,
extra_info={'compare_thumbnails': compare_thumbnails})
if len(errs) > 0:
for err in errs:
print(" *** Error in %s:%s:%s -- %s" % err)
self.fail(str(err))
except Exception, e:
self.fail(debug.format_exception(e))
return test
class TestVistrailImages(unittest.TestCase):
pass
if test_images:
for vt, t in image_tests:
for name, version in t:
test_name = 'test_%s' % name
test = image_test_generator(vt, version)
setattr(TestVistrailImages, test_name, test)
main_test_suite.addTest(TestVistrailImages(test_name))
############## RUN TEST SUITE ####################
class TestResult(unittest.TextTestResult):
def addSkip(self, test, reason):
self.stream.writeln("skipped '{0}': {1}".format(str(test), reason))
super(TestResult, self).addSkip(test, reason)
runner = unittest.TextTestRunner(
verbosity=max(verbose, 1),
resultclass=TestResult)
result = runner.run(main_test_suite)
if not result.wasSuccessful():
tests_passed = False
sub_print("Tests finished.", overline=True)
if test_examples:
import vistrails.core.console_mode
sub_print("Testing examples:")
summary = {}
nworkflows = 0
nvtfiles = 0
for vtfile in VT_EXAMPLES.keys():
try:
errs = []
filename = os.path.join(EXAMPLES_PATH,
vtfile)
print filename
locator = vistrails.core.db.locator.FileLocator(os.path.abspath(filename))
(v, abstractions, thumbnails, mashups) = vistrails.core.db.io.load_vistrail(locator)
w_list = []
for version,tag in v.get_tagMap().iteritems():
if tag not in VT_EXAMPLES[vtfile]:
w_list.append((locator,version))
nworkflows += 1
if len(w_list) > 0:
errs = vistrails.core.console_mode.run(w_list, update_vistrail=False)
summary[vtfile] = errs
except Exception, e:
errs.append((vtfile,"None", "None", debug.format_exception(e)))
summary[vtfile] = errs
nvtfiles += 1
print "-" * 79
print "Summary of Examples: %s workflows in %s vistrail files" % (
nworkflows, nvtfiles)
print ""
errors = False
for vtfile, errs in summary.iteritems():
print vtfile
if len(errs) > 0:
for err in errs:
print(" *** Error in %s:%s:%s -- %s" % err)
errors = True
else:
print " Ok."
print "-" * 79
if errors:
tests_passed = False
sub_print("There were errors. See summary for more information")
else:
sub_print("Examples ran successfully.")
vistrails.gui.application.get_vistrails_application().finishSession()
vistrails.gui.application.stop_application()
# Test Runners can use the return value to know if the tests passed
sys.exit(0 if tests_passed else 1)
| bsd-3-clause |
zaxliu/scipy | scipy/stats/tests/test_morestats.py | 19 | 50547 | # Author: Travis Oliphant, 2002
#
# Further enhancements and tests added by numerous SciPy developers.
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (TestCase, run_module_suite, assert_array_equal,
assert_almost_equal, assert_array_less, assert_array_almost_equal,
assert_raises, assert_, assert_allclose, assert_equal, dec, assert_warns)
from scipy import stats
from common_tests import check_named_results
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
# check if it's available
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
class TestBayes_mvs(TestCase):
def test_basic(self):
# Expected values in this test simply taken from the function. For
# some checks regarding correctness of implementation, see review in
# gh-674
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.bayes_mvs(data)
assert_almost_equal(mean.statistic, 9.0)
assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467),
rtol=1e-14)
assert_almost_equal(var.statistic, 10.0)
assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),
rtol=1e-09)
assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)
assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),
rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.bayes_mvs, [])
def test_result_attributes(self):
x = np.arange(15)
attributes = ('statistic', 'minmax')
res = stats.bayes_mvs(x)
for i in res:
check_named_results(i, attributes)
class TestMvsdist(TestCase):
def test_basic(self):
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.mvsdist(data)
assert_almost_equal(mean.mean(), 9.0)
assert_allclose(mean.interval(0.9), (7.1036502226125329,
10.896349777387467), rtol=1e-14)
assert_almost_equal(var.mean(), 10.0)
assert_allclose(var.interval(0.9), (3.1767242068607087,
24.45910381334018), rtol=1e-09)
assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14)
assert_allclose(std.interval(0.9), (1.7823367265645145,
4.9456146050146312), rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.mvsdist, [])
def test_bad_arg(self):
# Raise ValueError if fewer than two data points are given.
data = [1]
assert_raises(ValueError, stats.mvsdist, data)
class TestShapiro(TestCase):
def test_basic(self):
x1 = [0.11,7.87,4.61,10.14,7.95,3.14,0.46,
4.43,0.21,4.75,0.71,1.52,3.24,
0.93,0.42,4.97,9.53,4.55,0.47,6.66]
w,pw = stats.shapiro(x1)
assert_almost_equal(w,0.90047299861907959,6)
assert_almost_equal(pw,0.042089745402336121,6)
x2 = [1.36,1.14,2.92,2.55,1.46,1.06,5.27,-1.11,
3.48,1.10,0.88,-0.51,1.46,0.52,6.20,1.69,
0.08,3.67,2.81,3.49]
w,pw = stats.shapiro(x2)
assert_almost_equal(w,0.9590270,6)
assert_almost_equal(pw,0.52460,3)
# Verified against R
np.random.seed(12345678)
x3 = stats.norm.rvs(loc=5, scale=3, size=100)
w, pw = stats.shapiro(x3)
assert_almost_equal(w, 0.9772805571556091, decimal=6)
assert_almost_equal(pw, 0.08144091814756393, decimal=3)
# Extracted from original paper
x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614,
0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206,
3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351]
W_expected = 0.83467
p_expected = 0.000914
w, pw = stats.shapiro(x4)
assert_almost_equal(w, W_expected, decimal=4)
assert_almost_equal(pw, p_expected, decimal=5)
def test_2d(self):
x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75], [0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]]
w, pw = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, 6)
assert_almost_equal(pw, 0.042089745402336121, 6)
x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]]
w, pw = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, 6)
assert_almost_equal(pw, 0.52460, 3)
def test_empty_input(self):
assert_raises(ValueError, stats.shapiro, [])
assert_raises(ValueError, stats.shapiro, [[], [], []])
def test_not_enough_values(self):
assert_raises(ValueError, stats.shapiro, [1, 2])
assert_raises(ValueError, stats.shapiro, [[], [2]])
def test_bad_arg(self):
# Length of x is less than 3.
x = [1]
assert_raises(ValueError, stats.shapiro, x)
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
w, pw = stats.shapiro(x)
assert_equal(w, np.nan)
assert_almost_equal(pw, 1.0)
class TestAnderson(TestCase):
def test_normal(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A,crit,sig = stats.anderson(x1)
assert_array_less(crit[:-1], A)
A,crit,sig = stats.anderson(x2)
assert_array_less(A, crit[-2:])
def test_expon(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A,crit,sig = stats.anderson(x1,'expon')
assert_array_less(A, crit[-2:])
olderr = np.seterr(all='ignore')
try:
A,crit,sig = stats.anderson(x2,'expon')
finally:
np.seterr(**olderr)
assert_(A > crit[-1])
def test_bad_arg(self):
assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
def test_result_attributes(self):
rs = RandomState(1234567890)
x = rs.standard_exponential(size=50)
res = stats.anderson(x)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAndersonKSamp(TestCase):
def test_example1a(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
assert_warns(UserWarning, stats.anderson_ksamp, (t1, t2, t3, t4),
midrank=False)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
assert_almost_equal(Tk, 4.449, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0021, 4)
def test_example1b(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass arrays
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
assert_almost_equal(Tk, 4.480, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0020, 4)
def test_example2a(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
# Pass lists instead of arrays
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=False)
assert_almost_equal(Tk, 3.288, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_example2b(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=True)
assert_almost_equal(Tk, 3.294, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_not_enough_samples(self):
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
def test_no_distinct_observations(self):
assert_raises(ValueError, stats.anderson_ksamp,
(np.ones(5), np.ones(5)))
def test_empty_sample(self):
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
def test_result_attributes(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
res = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAnsari(TestCase):
def test_small(self):
x = [1,2,3,3,4]
y = [3,2,6,1,6,1,4,1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
W, pval = stats.ansari(x,y)
assert_almost_equal(W,23.5,11)
assert_almost_equal(pval,0.13499256881897437,11)
def test_approx(self):
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99))
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message="Ties preclude use of exact statistic.")
W, pval = stats.ansari(ramsay, parekh)
assert_almost_equal(W,185.5,11)
assert_almost_equal(pval,0.18145819972867083,11)
def test_exact(self):
W,pval = stats.ansari([1,2,3,4],[15,5,20,8,10,12])
assert_almost_equal(W,10.0,11)
assert_almost_equal(pval,0.533333333333333333,7)
def test_bad_arg(self):
assert_raises(ValueError, stats.ansari, [], [1])
assert_raises(ValueError, stats.ansari, [1], [])
def test_result_attributes(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
res = stats.ansari(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBartlett(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
T, pval = stats.bartlett(*args)
assert_almost_equal(T,20.78587342806484,7)
assert_almost_equal(pval,0.0136358632781,7)
def test_bad_arg(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.bartlett, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.bartlett(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_empty_arg(self):
args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])
assert_equal((np.nan, np.nan), stats.bartlett(*args))
class TestLevene(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
W, pval = stats.levene(*args)
assert_almost_equal(W,1.7059176930008939,7)
assert_almost_equal(pval,0.0990829755522,7)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed', proportiontocut=0.0)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
np.random.seed(1234)
x2 = np.random.permutation(x)
# Use center='trimmed'
W0, pval0 = stats.levene(x, y, center='trimmed', proportiontocut=0.125)
W1, pval1 = stats.levene(x2, y, center='trimmed', proportiontocut=0.125)
# Trim the data here, and use center='mean'
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(W0, W2)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_equal_mean_median(self):
x = np.linspace(-1,1,21)
np.random.seed(1234)
x2 = np.random.permutation(x)
y = x**3
W1, pval1 = stats.levene(x, y, center='mean')
W2, pval2 = stats.levene(x2, y, center='median')
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1,1,21)
assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1,1,21)
assert_raises(ValueError, stats.levene, x, x, center='trim')
def test_too_few_args(self):
assert_raises(ValueError, stats.levene, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.levene(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBinomP(TestCase):
def test_data(self):
pval = stats.binom_test(100,250)
assert_almost_equal(pval,0.0018833009350757682,11)
pval = stats.binom_test(201,405)
assert_almost_equal(pval,0.92085205962670713,11)
pval = stats.binom_test([682,243],p=3.0/4)
assert_almost_equal(pval,0.38249155957481695,11)
def test_bad_len_x(self):
# Length of x must be 1 or 2.
assert_raises(ValueError, stats.binom_test, [1,2,3])
def test_bad_n(self):
# len(x) is 1, but n is invalid.
# Missing n
assert_raises(ValueError, stats.binom_test, [100])
# n less than x[0]
assert_raises(ValueError, stats.binom_test, [100], n=50)
def test_bad_p(self):
assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0)
def test_alternatives(self):
res = stats.binom_test(51, 235, p=1./6, alternative='less')
assert_almost_equal(res, 0.982022657605858)
res = stats.binom_test(51, 235, p=1./6, alternative='greater')
assert_almost_equal(res, 0.02654424571169085)
res = stats.binom_test(51, 235, p=1./6, alternative='two-sided')
assert_almost_equal(res, 0.0437479701823997)
class TestFligner(TestCase):
def test_data(self):
# numbers from R: fligner.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.fligner(x1,x1**2),
(3.2282229927203536, 0.072379187848207877), 11)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
Xsq1, pval1 = stats.fligner(g1, g2, g3, center='mean')
Xsq2, pval2 = stats.fligner(g1, g2, g3, center='trimmed', proportiontocut=0.0)
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
# Use center='trimmed'
Xsq1, pval1 = stats.fligner(x, y, center='trimmed', proportiontocut=0.125)
# Trim the data here, and use center='mean'
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
# The following test looks reasonable at first, but fligner() uses the
# function stats.rankdata(), and in one of the cases in this test,
# there are ties, while in the other (because of normal rounding
# errors) there are not. This difference leads to differences in the
# third significant digit of W.
#
#def test_equal_mean_median(self):
# x = np.linspace(-1,1,21)
# y = x**3
# W1, pval1 = stats.fligner(x, y, center='mean')
# W2, pval2 = stats.fligner(x, y, center='median')
# assert_almost_equal(W1, W2)
# assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1,1,21)
assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1,1,21)
assert_raises(ValueError, stats.fligner, x, x, center='trim')
def test_bad_num_args(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.fligner, [1])
def test_empty_arg(self):
x = np.arange(5)
assert_equal((np.nan, np.nan), stats.fligner(x, x**2, []))
class TestMood(TestCase):
def test_mood(self):
# numbers from R: mood.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.mood(x1, x1**2),
(-1.3830857299399906, 0.16663858066771478), 11)
def test_mood_order_of_args(self):
# z should change sign when the order of arguments changes, pvalue
# should not change
np.random.seed(1234)
x1 = np.random.randn(10, 1)
x2 = np.random.randn(15, 1)
z1, p1 = stats.mood(x1, x2)
z2, p2 = stats.mood(x2, x1)
assert_array_almost_equal([z1, p1], [-z2, p2])
def test_mood_with_axis_none(self):
#Test with axis = None, compare with results from R
x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
1.59528080213779, 0.329507771815361, -0.820468384118015,
0.487429052428485, 0.738324705129217, 0.575781351653492,
-0.305388387156356, 1.51178116845085, 0.389843236411431,
-0.621240580541804, -2.2146998871775, 1.12493091814311,
-0.0449336090152309, -0.0161902630989461, 0.943836210685299,
0.821221195098089, 0.593901321217509]
x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
-1.13037567424629, -0.0802517565509893, 0.132420284381094,
0.707954729271733, -0.23969802417184, 1.98447393665293,
-0.138787012119665, 0.417650750792556, 0.981752777463662,
-0.392695355503813, -1.03966897694891, 1.78222896030858,
-2.31106908460517, 0.878604580921265, 0.035806718015226,
1.01282869212708, 0.432265154539617, 2.09081920524915,
-1.19992581964387, 1.58963820029007, 1.95465164222325,
0.00493777682814261, -2.45170638784613, 0.477237302613617,
-0.596558168631403, 0.792203270299649, 0.289636710177348]
x1 = np.array(x1)
x2 = np.array(x2)
x1.shape = (10, 2)
x2.shape = (15, 2)
assert_array_almost_equal(stats.mood(x1, x2, axis=None),
[-1.31716607555, 0.18778296257])
def test_mood_2d(self):
# Test if the results of mood test in 2-D case are consistent with the
# R result for the same inputs. Numbers from R mood.test().
ny = 5
np.random.seed(1234)
x1 = np.random.randn(10, ny)
x2 = np.random.randn(15, ny)
z_vectest, pval_vectest = stats.mood(x1, x2)
for j in range(ny):
assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
stats.mood(x1[:, j], x2[:, j]))
# inverse order of dimensions
x1 = x1.transpose()
x2 = x2.transpose()
z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
for i in range(ny):
# check axis handling is self consistent
assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
stats.mood(x1[i, :], x2[i, :]))
def test_mood_3d(self):
shape = (10, 5, 6)
np.random.seed(1234)
x1 = np.random.randn(*shape)
x2 = np.random.randn(*shape)
for axis in range(3):
z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
# Tests that result for 3-D arrays is equal to that for the
# same calculation on a set of 1-D arrays taken from the
# 3-D array
axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis
for i in range(shape[axes_idx[axis][0]]):
for j in range(shape[axes_idx[axis][1]]):
if axis == 0:
slice1 = x1[:, i, j]
slice2 = x2[:, i, j]
elif axis == 1:
slice1 = x1[i, :, j]
slice2 = x2[i, :, j]
else:
slice1 = x1[i, j, :]
slice2 = x2[i, j, :]
assert_array_almost_equal([z_vectest[i, j],
pval_vectest[i, j]],
stats.mood(slice1, slice2))
def test_mood_bad_arg(self):
# Raise ValueError when the sum of the lengths of the args is less than 3
assert_raises(ValueError, stats.mood, [1], [])
class TestProbplot(TestCase):
def test_basic(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm, osr = stats.probplot(x, fit=False)
osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
-0.73908135, -0.5857176, -0.44506467, -0.31273668,
-0.18568928, -0.06158146, 0.06158146, 0.18568928,
0.31273668, 0.44506467, 0.5857176, 0.73908135,
0.91222575, 1.11829229, 1.38768012, 1.8241636]
assert_allclose(osr, np.sort(x))
assert_allclose(osm, osm_expected)
res, res_fit = stats.probplot(x, fit=True)
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
assert_allclose(res_fit, res_fit_expected)
def test_sparams_keyword(self):
np.random.seed(123456)
x = stats.norm.rvs(size=100)
# Check that None, () and 0 (loc=0, for normal distribution) all work
# and give the same results
osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osm1, osm3)
assert_allclose(osr1, osr2)
assert_allclose(osr1, osr3)
# Check giving (loc, scale) params for normal distribution
osm, osr = stats.probplot(x, sparams=(), fit=False)
def test_dist_keyword(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
assert_raises(AttributeError, stats.probplot, x, dist=[])
class custom_dist(object):
"""Some class that looks just enough like a distribution."""
def ppf(self, q):
return stats.norm.ppf(q, loc=2)
osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
np.random.seed(7654321)
fig = plt.figure()
fig.add_subplot(111)
x = stats.t.rvs(3, size=100)
res1, fitres1 = stats.probplot(x, plot=plt)
plt.close()
res2, fitres2 = stats.probplot(x, plot=None)
res3 = stats.probplot(x, fit=False, plot=plt)
plt.close()
res4 = stats.probplot(x, fit=False, plot=None)
# Check that results are consistent between combinations of `fit` and
# `plot` keywords.
assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
assert_allclose(res1, res2)
assert_allclose(res1, res3)
assert_allclose(res1, res4)
assert_allclose(fitres1, fitres2)
# Check that a Matplotlib Axes object is accepted
fig = plt.figure()
ax = fig.add_subplot(111)
stats.probplot(x, fit=False, plot=ax)
plt.close()
def test_probplot_bad_args(self):
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
def test_empty(self):
assert_equal(stats.probplot([], fit=False),
(np.array([]), np.array([])))
assert_equal(stats.probplot([], fit=True),
((np.array([]), np.array([])),
(np.nan, np.nan, 0.0)))
def test_array_of_size_one(self):
with np.errstate(invalid='ignore'):
assert_equal(stats.probplot([1], fit=True),
((np.array([0.]), np.array([1])),
(np.nan, np.nan, 0.0)))
def test_wilcoxon_bad_arg():
# Raise ValueError when two args of different lengths are given or
# zero_method is unknown.
assert_raises(ValueError, stats.wilcoxon, [1], [1,2])
assert_raises(ValueError, stats.wilcoxon, [1,2], [1,2], "dummy")
class TestKstat(TestCase):
def test_moments_normal_distribution(self):
np.random.seed(32149)
data = np.random.randn(12345)
moments = []
for n in [1, 2, 3, 4]:
moments.append(stats.kstat(data, n))
expected = [0.011315, 1.017931, 0.05811052, 0.0754134]
assert_allclose(moments, expected, rtol=1e-4)
# test equivalence with `stats.moment`
m1 = stats.moment(data, moment=1)
m2 = stats.moment(data, moment=2)
m3 = stats.moment(data, moment=3)
assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2)
def test_empty_input(self):
assert_raises(ValueError, stats.kstat, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_kstat_bad_arg(self):
# Raise ValueError if n > 4 or n < 1.
data = np.arange(10)
for n in [0, 4.001]:
assert_raises(ValueError, stats.kstat, data, n=n)
class TestKstatVar(TestCase):
def test_empty_input(self):
assert_raises(ValueError, stats.kstatvar, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_bad_arg(self):
# Raise ValueError is n is not 1 or 2.
data = [1]
n = 10
assert_raises(ValueError, stats.kstatvar, data, n=n)
class TestPpccPlot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182, 0.93519298]
assert_allclose(svals, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
def test_dist(self):
# Test that we can specify distributions both by name and as objects.
svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10, dist=stats.tukeylambda)
assert_allclose(svals1, svals2, rtol=1e-20)
assert_allclose(ppcc1, ppcc2, rtol=1e-20)
# Test that 'tukeylambda' is the default dist
svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
assert_allclose(svals1, svals3, rtol=1e-20)
assert_allclose(ppcc1, ppcc3, rtol=1e-20)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `b` has to be larger than `a`
assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
dist="plate_of_shrimp")
def test_empty(self):
# For consistency with probplot return for one empty array,
# ppcc contains all zeros and svals is the same as for normal array
# input.
svals, ppcc = stats.ppcc_plot([], 0, 1)
assert_allclose(svals, np.linspace(0, 1, num=80))
assert_allclose(ppcc, np.zeros(80, dtype=float))
class TestPpccMax(TestCase):
def test_ppcc_max_bad_arg(self):
# Raise ValueError when given an invalid distribution.
data = [1]
assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
def test_ppcc_max_basic(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=5)
def test_dist(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# Test that we can specify distributions both by name and as objects.
max1 = stats.ppcc_max(x, dist='tukeylambda')
max2 = stats.ppcc_max(x, dist=stats.tukeylambda)
assert_almost_equal(max1, -0.71215366521264145, decimal=5)
assert_almost_equal(max2, -0.71215366521264145, decimal=5)
# Test that 'tukeylambda' is the default dist
max3 = stats.ppcc_max(x)
assert_almost_equal(max3, -0.71215366521264145, decimal=5)
def test_brack(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5))
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)),
-0.71215366521264145, decimal=5)
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)),
-0.71215366521264145, decimal=5)
class TestBoxcox_llf(TestCase):
def test_basic(self):
np.random.seed(54321)
x = stats.norm.rvs(size=10000, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
assert_allclose(llf, llf_expected)
def test_array_like(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that. boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.boxcox_llf(1, [])))
class TestBoxcox(TestCase):
def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)
xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))
# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x))
def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5
# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292])
def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x)
def test_empty(self):
assert_(stats.boxcox([]).shape == (0,))
class TestBoxcoxNormmax(TestCase):
def setUp(self):
np.random.seed(12345)
self.x = stats.loggamma.rvs(5, size=50) + 5
def test_pearsonr(self):
maxlog = stats.boxcox_normmax(self.x)
assert_allclose(maxlog, 1.804465, rtol=1e-6)
def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)
# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog)
def test_all(self):
maxlog_all = stats.boxcox_normmax(self.x, method='all')
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
class TestBoxcoxNormplot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
0.95843297]
assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `lb` has to be larger than `la`
assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
# `x` can not contain negative values
assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)
def test_empty(self):
assert_(stats.boxcox_normplot([], 0, 1).size == 0)
class TestCircFuncs(TestCase):
def test_circfuncs(self):
x = np.array([355,5,2,359,10,350])
M = stats.circmean(x, high=360)
Mval = 0.167690146
assert_allclose(M, Mval, rtol=1e-7)
V = stats.circvar(x, high=360)
Vval = 42.51955609
assert_allclose(V, Vval, rtol=1e-7)
S = stats.circstd(x, high=360)
Sval = 6.520702116
assert_allclose(S, Sval, rtol=1e-7)
def test_circfuncs_small(self):
x = np.array([20,21,22,18,19,20.5,19.2])
M1 = x.mean()
M2 = stats.circmean(x, high=360)
assert_allclose(M2, M1, rtol=1e-5)
V1 = x.var()
V2 = stats.circvar(x, high=360)
assert_allclose(V2, V1, rtol=1e-4)
S1 = x.std()
S2 = stats.circstd(x, high=360)
assert_allclose(S2, S1, rtol=1e-4)
def test_circmean_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
M1 = stats.circmean(x, high=360)
M2 = stats.circmean(x.ravel(), high=360)
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=1)
M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=0)
M2 = [stats.circmean(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(M1, M2, rtol=1e-14)
def test_circvar_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
V1 = stats.circvar(x, high=360)
V2 = stats.circvar(x.ravel(), high=360)
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=1)
V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=0)
V2 = [stats.circvar(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(V1, V2, rtol=1e-11)
def test_circstd_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
S1 = stats.circstd(x, high=360)
S2 = stats.circstd(x.ravel(), high=360)
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=1)
S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=0)
S2 = [stats.circstd(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(S1, S2, rtol=1e-11)
def test_circfuncs_array_like(self):
x = [355,5,2,359,10,350]
assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
def test_empty(self):
assert_(np.isnan(stats.circmean([])))
assert_(np.isnan(stats.circstd([])))
assert_(np.isnan(stats.circvar([])))
def test_accuracy_wilcoxon():
freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
nums = range(-4, 5)
x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
y = np.zeros(x.size)
T, p = stats.wilcoxon(x, y, "pratt")
assert_allclose(T, 423)
assert_allclose(p, 0.00197547303533107)
T, p = stats.wilcoxon(x, y, "zsplit")
assert_allclose(T, 441)
assert_allclose(p, 0.0032145343172473055)
T, p = stats.wilcoxon(x, y, "wilcox")
assert_allclose(T, 327)
assert_allclose(p, 0.00641346115861)
# Test the 'correction' option, using values computed in R with:
# > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
T, p = stats.wilcoxon(x, y, correction=False)
assert_equal(T, 34)
assert_allclose(p, 0.6948866, rtol=1e-6)
T, p = stats.wilcoxon(x, y, correction=True)
assert_equal(T, 34)
assert_allclose(p, 0.7240817, rtol=1e-6)
def test_wilcoxon_result_attributes():
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
res = stats.wilcoxon(x, y, correction=False)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_wilcoxon_tie():
# Regression test for gh-2391.
# Corresponding R code is:
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
# > result$p.value
# [1] 0.001565402
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
# > result$p.value
# [1] 0.001904195
stat, p = stats.wilcoxon([0.1] * 10)
expected_p = 0.001565402
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
stat, p = stats.wilcoxon([0.1] * 10, correction=True)
expected_p = 0.001904195
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
class TestMedianTest(TestCase):
def test_bad_n_samples(self):
# median_test requires at least two samples.
assert_raises(ValueError, stats.median_test, [1, 2, 3])
def test_empty_sample(self):
# Each sample must contain at least one value.
assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
def test_empty_when_ties_ignored(self):
# The grand median is 1, and all values in the first argument are
# equal to the grand median. With ties="ignore", those values are
# ignored, which results in the first sample being (in effect) empty.
# This should raise a ValueError.
assert_raises(ValueError, stats.median_test,
[1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
def test_empty_contingency_row(self):
# The grand median is 1, and with the default ties="below", all the
# values in the samples are counted as being below the grand median.
# This would result a row of zeros in the contingency table, which is
# an error.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
# With ties="above", all the values are counted as above the
# grand median.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
ties="above")
def test_bad_ties(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], ties="foo")
def test_bad_keyword(self):
assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5], foo="foo")
def test_simple(self):
x = [1, 2, 3]
y = [1, 2, 3]
stat, p, med, tbl = stats.median_test(x, y)
# The median is floating point, but this equality test should be safe.
assert_equal(med, 2.0)
assert_array_equal(tbl, [[1, 1], [2, 2]])
# The expected values of the contingency table equal the contingency table,
# so the statistic should be 0 and the p-value should be 1.
assert_equal(stat, 0)
assert_equal(p, 1)
def test_ties_options(self):
# Test the contingency table calculation.
x = [1, 2, 3, 4]
y = [5, 6]
z = [7, 8, 9]
# grand median is 5.
# Default 'ties' option is "below".
stat, p, m, tbl = stats.median_test(x, y, z)
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
assert_equal(m, 5)
assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
def test_basic(self):
# median_test calls chi2_contingency to compute the test statistic
# and p-value. Make sure it hasn't screwed up the call...
x = [1, 2, 3, 4, 5]
y = [2, 4, 6, 8]
stat, p, m, tbl = stats.median_test(x, y)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, correction=False)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
neuropoly/spinalcordtoolbox | dev/atlas/validate_atlas/plot_map.py | 1 | 19061 | #!/usr/bin/env python
__author__ = 'Simon_2'
# ======================================================================================================================
# Extract results from .txt files generated by " " and draw plots to validate metric extraction.
# ======================================================================================================================
import os
import glob
import getopt
import sys
import numpy
import re
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.legend_handler import *
# import subprocess
path_sct = os.environ.get("SCT_DIR", os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
sys.path.append(os.path.join(path_sct, "scripts"))
import sct_utils as sct
class Param:
def __init__(self):
self.debug = 0
self.results_folder = 'results_20150210_200iter/map'
self.methods_to_display = 'map'
self.fname_folder_to_save_fig = './result_plots' #/Users/slevy_local/Dropbox/article_wm_atlas/fig/to_include_in_article'
def color_legend_texts(leg):
"""Color legend texts based on color of corresponding lines"""
for line, txt in zip(leg.get_lines(), leg.get_texts()):
txt.set_color(line.get_color())
line.set_color('white')
#=======================================================================================================================
# main
#=======================================================================================================================
def main():
results_folder = param_default.results_folder
methods_to_display = param_default.methods_to_display
# Parameters for debug mode
if param_default.debug:
print '\n*** WARNING: DEBUG MODE ON ***\n'
results_folder = "/Users/slevy_local/spinalcordtoolbox/dev/atlas/validate_atlas/results_20150210_200iter/map" #"C:/cygwin64/home/Simon_2/data_map"
methods_to_display = 'map'
else:
# Check input parameters
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:m:') # define flags
except getopt.GetoptError as err: # check if the arguments are defined
print str(err) # error
# usage() # display usage
# if not opts:
# print 'Please enter the path to the result folder. Exit program.'
# sys.exit(1)
# # usage()
for opt, arg in opts: # explore flags
if opt in '-i':
results_folder = arg
if opt in '-m':
methods_to_display = arg
sct.printv("Working directory: "+os.getcwd())
sct.printv('\n\nData will be extracted from folder '+results_folder+' .', 'warning')
sct.printv('\t\tCheck existence...')
sct.check_folder_exist(results_folder)
# Extract methods to display
methods_to_display = methods_to_display.strip().split(',')
fname_results = glob.glob(results_folder + '/*.txt')
nb_results_file = len(fname_results)
# 1st dim: SNR, 2nd dim: tract std, 3rd dim: mean abs error, 4th dim: std abs error
# result_array = numpy.empty((nb_results_file, nb_results_file, 3), dtype=object)
# SNR
snr = numpy.zeros((nb_results_file))
# Tracts std
tracts_std = numpy.zeros((nb_results_file))
# methods' name
methods_name = [] #numpy.empty((nb_results_file, nb_method), dtype=object)
# labels
error_per_label = []
std_per_label = []
labels_id = []
# median
median_results = numpy.zeros((nb_results_file, 6))
# median std across bootstraps
median_std = numpy.zeros((nb_results_file, 6))
# min
min_results = numpy.zeros((nb_results_file, 6))
# max
max_results = numpy.zeros((nb_results_file, 6))
# Extract variance within labels and variance of noise
map_var_params = numpy.zeros((nb_results_file, 2))
for i_file in range(0, nb_results_file):
fname = fname_results[i_file].strip()
ind_start, ind_end = fname.index('results_map')+11, fname.index('_all.txt')
var = fname[ind_start:ind_end]
map_var_params[i_file, 0] = float(var.split(",")[0])
map_var_params[i_file, 1] = float(var.split(",")[1])
# Read each file and extract data
for i_file in range(0, nb_results_file):
# Open file
f = open(fname_results[i_file]) # open file
# Extract all lines in .txt file
lines = [line for line in f.readlines() if line.strip()]
# extract SNR
# find all index of lines containing the string "sigma noise"
ind_line_noise = [lines.index(line_noise) for line_noise in lines if "sigma noise" in line_noise]
if len(ind_line_noise) != 1:
sct.printv("ERROR: number of lines including \"sigma noise\" is different from 1. Exit program.", 'error')
sys.exit(1)
else:
# result_array[:, i_file, i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit()))
snr[i_file] = int(''.join(c for c in lines[ind_line_noise[0]] if c.isdigit()))
# extract tract std
ind_line_tract_std = [lines.index(line_tract_std) for line_tract_std in lines if "range tracts" in line_tract_std]
if len(ind_line_tract_std) != 1:
sct.printv("ERROR: number of lines including \"range tracts\" is different from 1. Exit program.", 'error')
sys.exit(1)
else:
# result_array[i_file, i_file, :] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit()))
# regex = re.compile(''('(.*)':) # re.I permet d'ignorer la case (majuscule/minuscule)
# match = regex.search(lines[ind_line_tract_std[0]])
# result_array[:, i_file, :, :] = match.group(1) # le groupe 1 correspond a '.*'
tracts_std[i_file] = int(''.join(c for c in lines[ind_line_tract_std[0]].split(':')[1] if c.isdigit()))
# extract method name
ind_line_label = [lines.index(line_label) for line_label in lines if "Label" in line_label]
if len(ind_line_label) != 1:
sct.printv("ERROR: number of lines including \"Label\" is different from 1. Exit program.", 'error')
sys.exit(1)
else:
# methods_name[i_file, :] = numpy.array(lines[ind_line_label[0]].strip().split(',')[1:])
methods_name.append(lines[ind_line_label[0]].strip().replace(' ', '').split(',')[1:])
# extract median
ind_line_median = [lines.index(line_median) for line_median in lines if "median" in line_median]
if len(ind_line_median) != 1:
sct.printv("WARNING: number of lines including \"median\" is different from 1. Exit program.", 'warning')
# sys.exit(1)
else:
median = lines[ind_line_median[0]].strip().split(',')[1:]
# result_array[i_file, i_file, 0] = [float(m.split('(')[0]) for m in median]
median_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in median])
median_std[i_file, :] = numpy.array([float(m.split('(')[1][:-1]) for m in median])
# extract min
ind_line_min = [lines.index(line_min) for line_min in lines if "min," in line_min]
if len(ind_line_min) != 1:
sct.printv("WARNING: number of lines including \"min\" is different from 1. Exit program.", 'warning')
# sys.exit(1)
else:
min = lines[ind_line_min[0]].strip().split(',')[1:]
# result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in min]
min_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in min])
# extract max
ind_line_max = [lines.index(line_max) for line_max in lines if "max" in line_max]
if len(ind_line_max) != 1:
sct.printv("WARNING: number of lines including \"max\" is different from 1. Exit program.", 'warning')
# sys.exit(1)
else:
max = lines[ind_line_max[0]].strip().split(',')[1:]
# result_array[i_file, i_file, 1] = [float(m.split('(')[0]) for m in max]
max_results[i_file, :] = numpy.array([float(m.split('(')[0]) for m in max])
# extract error for each label
error_per_label_for_file_i = []
std_per_label_for_file_i = []
labels_id_for_file_i = []
# Due to 2 different kind of file structure, the number of the last label line must be adapted
if not ind_line_median:
ind_line_median = [len(lines)+1]
for i_line in range(ind_line_label[0]+1, ind_line_median[0]-1):
line_label_i = lines[i_line].strip().split(',')
error_per_label_for_file_i.append([float(error.strip().split('(')[0]) for error in line_label_i[1:]])
std_per_label_for_file_i.append([float(error.strip().split('(')[1][:-1]) for error in line_label_i[1:]])
labels_id_for_file_i.append(line_label_i[0])
error_per_label.append(error_per_label_for_file_i)
std_per_label.append(std_per_label_for_file_i)
labels_id.append(labels_id_for_file_i)
# close file
f.close()
# check if all the files in the result folder were generated with the same number of methods
if not all(x == methods_name[0] for x in methods_name):
sct.printv('ERROR: All the generated files in folder '+results_folder+' have not been generated with the same number of methods. Exit program.', 'error')
sys.exit(1)
# check if all the files in the result folder were generated with the same labels
if not all(x == labels_id[0] for x in labels_id):
sct.printv('ERROR: All the generated files in folder '+results_folder+' have not been generated with the same labels. Exit program.', 'error')
sys.exit(1)
# convert the list "error_per_label" into a numpy array to ease further manipulations
error_per_label = numpy.array(error_per_label)
std_per_label = numpy.array(std_per_label)
# compute different stats
abs_error_per_labels = numpy.absolute(error_per_label)
max_abs_error_per_meth = numpy.amax(abs_error_per_labels, axis=1)
min_abs_error_per_meth = numpy.amin(abs_error_per_labels, axis=1)
mean_abs_error_per_meth = numpy.mean(abs_error_per_labels, axis=1)
std_abs_error_per_meth = numpy.std(abs_error_per_labels, axis=1)
sct.printv('Noise std of the '+str(nb_results_file)+' generated files:')
print snr
print '----------------------------------------------------------------------------------------------------------------'
sct.printv('Tracts std of the '+str(nb_results_file)+' generated files:')
print tracts_std
print '----------------------------------------------------------------------------------------------------------------'
sct.printv('Methods used to generate results for the '+str(nb_results_file)+' generated files:')
print methods_name
print '----------------------------------------------------------------------------------------------------------------'
sct.printv('Median obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):')
print median_results
print '----------------------------------------------------------------------------------------------------------------'
sct.printv('Minimum obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):')
print min_results
print '----------------------------------------------------------------------------------------------------------------'
sct.printv('Maximum obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):')
print max_results
print '----------------------------------------------------------------------------------------------------------------'
sct.printv('Labels\' ID (in colons) for the '+str(nb_results_file)+' generated files (in lines):')
print labels_id
print '----------------------------------------------------------------------------------------------------------------'
sct.printv('Errors obtained with each method (in colons) for the '+str(nb_results_file)+' generated files (in lines):')
print error_per_label
# ********************************** START PLOTTING HERE ***********************************************************
matplotlib.rcParams.update({'font.size': 45, 'font.family': 'Trebuchet'})
plt.rcParams['xtick.major.pad'] = '9'
plt.rcParams['ytick.major.pad'] = '15'
# matplotlib.rcParams['legend.handlelength'] = 0
# find indexes of files to be plotted
ind_var_noise20 = numpy.where(map_var_params[:, 1] == 20) # indexes where noise variance = 20
ind_ind_var_label_sort_var_noise20 = numpy.argsort(map_var_params[ind_var_noise20, 0]) # indexes of indexes where noise variance=20 sorted according to values of variance within labels (in ascending order)
ind_var_label_sort_var_noise20 = ind_var_noise20[0][ind_ind_var_label_sort_var_noise20][0] # indexes where noise variance=20 sorted according to values of variance within labels (in ascending order)
ind_var_label20 = numpy.where(map_var_params[:, 0] == 20) # indexes where variance within labels = 20
ind_ind_var_noise_sort_var_label20 = numpy.argsort(map_var_params[ind_var_label20, 1]) # indexes of indexes where label variance=20 sorted according to values of noise variance (in ascending order)
ind_var_noise_sort_var_label20 = ind_var_label20[0][ind_ind_var_noise_sort_var_label20][0] # indexes where noise variance=20 sorted according to values of variance within labels (in ascending order)
plt.close('all')
# Errorbar plot
plt.figure()
plt.ylabel('Mean absolute error (%)', fontsize=55)
plt.xlabel('Variance within labels (in percentage of the mean)', fontsize=55)
plt.title('Sensitivity of the method \"MAP\" to the variance within labels and to the SNR\n', fontsize=65)
plt.errorbar(map_var_params[ind_var_label_sort_var_noise20, 0], mean_abs_error_per_meth[ind_var_label_sort_var_noise20, 0], std_abs_error_per_meth[ind_var_label_sort_var_noise20, 0], color='blue', marker='o', linestyle='--', markersize=8, elinewidth=2, capthick=2, capsize=10)
plt.errorbar(map_var_params[ind_var_noise_sort_var_label20, 1], mean_abs_error_per_meth[ind_var_noise_sort_var_label20, 0], std_abs_error_per_meth[ind_var_noise_sort_var_label20, 0], color='red', marker='o', linestyle='--', markersize=8, elinewidth=2, capthick=2, capsize=10)
# plt.legend(plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0., handler_map={Line2D: HandlerLine2D(numpoints=1)})
plt.legend(['noise variance = 20', 'variance within labels = 20% of the mean'], loc='best', handler_map={Line2D: HandlerLine2D(numpoints=1)})
plt.gca().set_xlim([numpy.min(map_var_params[ind_var_label_sort_var_noise20, 0]) - 1, numpy.max(map_var_params[ind_var_label_sort_var_noise20, 0]) + 1])
plt.grid(b=True, axis='both')
# plt.gca().yaxis.set_major_locator(plt.MultipleLocator(2.5))
# Box-and-whisker plots
nb_box = 2
plt.figure(figsize=(30, 15))
width = 1.0 / (nb_box + 1)
ind_fig = numpy.arange(len(map_var_params[ind_var_label_sort_var_noise20, 0])) * (1.0 + width)
plt.ylabel('Absolute error (%)\n', fontsize=55)
plt.xlabel('Variance', fontsize=55)
plt.title('Sensitivity of the method \"MAP\" to the variance within labels and to the SNR\n', fontsize=65)
# colors = plt.get_cmap('jet')(np.linspace(0, 1.0, nb_box))
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
box_plots = []
boxprops = dict(linewidth=6, color='b')
flierprops = dict(markeredgewidth=0.7, markersize=15, marker='.', color='b')
whiskerprops = dict(linewidth=5, color='b')
capprops = dict(linewidth=5, color='b')
medianprops = dict(linewidth=6, color='b')
meanpointprops = dict(marker='D', markeredgecolor='black', markerfacecolor='firebrick')
meanlineprops = dict(linestyle='--', linewidth=2.5)
plot_constant_noise_var = plt.boxplot(numpy.transpose(abs_error_per_labels[ind_var_label_sort_var_noise20, :, 0]), positions=ind_fig, widths=width, boxprops=boxprops, medianprops=medianprops, flierprops=flierprops, whiskerprops=whiskerprops, capprops=capprops)
box_plots.append(plot_constant_noise_var['boxes'][0])
boxprops = dict(linewidth=6, color='r')
flierprops = dict(markeredgewidth=0.7, markersize=15, marker='.', color='r')
whiskerprops = dict(linewidth=5, color='r')
capprops = dict(linewidth=5, color='r')
medianprops = dict(linewidth=6, color='r')
meanpointprops = dict(marker='D', markeredgecolor='black', markerfacecolor='firebrick')
meanlineprops = dict(linestyle='--', linewidth=2.5)
plot_constant_label_var = plt.boxplot(numpy.transpose(abs_error_per_labels[ind_var_noise_sort_var_label20, :, 0]), positions=ind_fig + width + width / (nb_box + 1), widths=width, boxprops=boxprops, medianprops=medianprops, flierprops=flierprops, whiskerprops=whiskerprops, capprops=capprops)
box_plots.append(plot_constant_label_var['boxes'][0])
# add alternated vertical background colored bars
for i_xtick in range(0, len(ind_fig), 2):
plt.axvspan(ind_fig[i_xtick] - width - width / 4, ind_fig[i_xtick] + (nb_box+1) * width - width / 4, facecolor='grey', alpha=0.1)
# plt.legend(box_plots, methods_to_display, bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)
# leg = plt.legend(box_plots, [r'$\mathrm{\mathsf{noise\ variance\ =\ 20\ voxels^2}}$', r'$\mathrm{\mathsf{variance\ within\ labels\ =\ 20\%\ of\ the\ mean\ value}}$'], loc=1, handletextpad=-2)
# color_legend_texts(leg)
# convert xtick labels into int
xtick_labels = [int(xtick) for xtick in map_var_params[ind_var_label_sort_var_noise20, 0]]
plt.xticks(ind_fig + (numpy.floor(nb_box / 2)) * (width/2) * (1.0 + 1.0 / (nb_box + 1)), xtick_labels)
plt.gca().set_xlim([-width, numpy.max(ind_fig) + (nb_box + 0.5) * width])
plt.gca().yaxis.set_major_locator(plt.MultipleLocator(1.0))
plt.gca().yaxis.set_minor_locator(plt.MultipleLocator(0.25))
plt.grid(b=True, axis='y', which='both')
plt.savefig(os.path.join(param_default.fname_folder_to_save_fig, 'absolute_error_as_a_function_of_MAP_parameters.pdf'), format='PDF')
plt.show(block=False)
#=======================================================================================================================
# Start program
#=======================================================================================================================
if __name__ == "__main__":
param_default = Param()
# call main function
main()
| mit |
gef756/statsmodels | statsmodels/graphics/tests/test_correlation.py | 31 | 1112 | import numpy as np
from numpy.testing import dec
from statsmodels.graphics.correlation import plot_corr, plot_corr_grid
from statsmodels.datasets import randhie
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
@dec.skipif(not have_matplotlib)
def test_plot_corr():
hie_data = randhie.load_pandas()
corr_matrix = np.corrcoef(hie_data.data.values.T)
fig = plot_corr(corr_matrix, xnames=hie_data.names)
plt.close(fig)
fig = plot_corr(corr_matrix, xnames=[], ynames=hie_data.names)
plt.close(fig)
fig = plot_corr(corr_matrix, normcolor=True, title='', cmap='jet')
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_plot_corr_grid():
hie_data = randhie.load_pandas()
corr_matrix = np.corrcoef(hie_data.data.values.T)
fig = plot_corr_grid([corr_matrix] * 2, xnames=hie_data.names)
plt.close(fig)
fig = plot_corr_grid([corr_matrix] * 5, xnames=[], ynames=hie_data.names)
plt.close(fig)
fig = plot_corr_grid([corr_matrix] * 3, normcolor=True, titles='', cmap='jet')
plt.close(fig)
| bsd-3-clause |
sintetizzatore/ThinkStats2 | code/scatter.py | 69 | 4281 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import sys
import numpy as np
import math
import brfss
import thinkplot
import thinkstats2
def GetHeightWeight(df, hjitter=0.0, wjitter=0.0):
"""Get sequences of height and weight.
df: DataFrame with htm3 and wtkg2
hjitter: float magnitude of random noise added to heights
wjitter: float magnitude of random noise added to weights
returns: tuple of sequences (heights, weights)
"""
heights = df.htm3
if hjitter:
heights = thinkstats2.Jitter(heights, hjitter)
weights = df.wtkg2
if wjitter:
weights = thinkstats2.Jitter(weights, wjitter)
return heights, weights
def ScatterPlot(heights, weights, alpha=1.0):
"""Make a scatter plot and save it.
heights: sequence of float
weights: sequence of float
alpha: float
"""
thinkplot.Scatter(heights, weights, alpha=alpha)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def HexBin(heights, weights, bins=None):
"""Make a hexbin plot and save it.
heights: sequence of float
weights: sequence of float
bins: 'log' or None for linear
"""
thinkplot.HexBin(heights, weights, bins=bins)
thinkplot.Config(xlabel='height (cm)',
ylabel='weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def MakeFigures(df):
"""Make scatterplots.
"""
sample = thinkstats2.SampleRows(df, 5000)
# simple scatter plot
thinkplot.PrePlot(cols=2)
heights, weights = GetHeightWeight(sample)
ScatterPlot(heights, weights)
# scatter plot with jitter
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(sample, hjitter=1.3, wjitter=0.5)
ScatterPlot(heights, weights)
thinkplot.Save(root='scatter1')
# with jitter and transparency
thinkplot.PrePlot(cols=2)
ScatterPlot(heights, weights, alpha=0.1)
# hexbin plot
thinkplot.SubPlot(2)
heights, weights = GetHeightWeight(df, hjitter=1.3, wjitter=0.5)
HexBin(heights, weights)
thinkplot.Save(root='scatter2')
def BinnedPercentiles(df):
"""Bin the data by height and plot percentiles of weight for eachbin.
df: DataFrame
"""
cdf = thinkstats2.Cdf(df.htm3)
print('Fraction between 140 and 200 cm', cdf[200] - cdf[140])
bins = np.arange(135, 210, 5)
indices = np.digitize(df.htm3, bins)
groups = df.groupby(indices)
heights = [group.htm3.mean() for i, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.wtkg2) for i, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
weights = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(heights, weights, label=label)
thinkplot.Save(root='scatter3',
xlabel='height (cm)',
ylabel='weight (kg)')
def Correlations(df):
print('pandas cov', df.htm3.cov(df.wtkg2))
#print('NumPy cov', np.cov(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Cov', thinkstats2.Cov(df.htm3, df.wtkg2))
print()
print('pandas corr', df.htm3.corr(df.wtkg2))
#print('NumPy corrcoef', np.corrcoef(df.htm3, df.wtkg2, ddof=0))
print('thinkstats2 Corr', thinkstats2.Corr(df.htm3, df.wtkg2))
print()
print('pandas corr spearman', df.htm3.corr(df.wtkg2, method='spearman'))
print('thinkstats2 SpearmanCorr',
thinkstats2.SpearmanCorr(df.htm3, df.wtkg2))
print('thinkstats2 SpearmanCorr log wtkg3',
thinkstats2.SpearmanCorr(df.htm3, np.log(df.wtkg2)))
print()
print('thinkstats2 Corr log wtkg3',
thinkstats2.Corr(df.htm3, np.log(df.wtkg2)))
print()
def main(script):
thinkstats2.RandomSeed(17)
df = brfss.ReadBrfss(nrows=None)
df = df.dropna(subset=['htm3', 'wtkg2'])
Correlations(df)
return
MakeFigures(df)
BinnedPercentiles(df)
if __name__ == '__main__':
main(*sys.argv)
| gpl-3.0 |
FRC-RS/FRS | util/management/commands/plot_elo.py | 1 | 4997 | from django.core.management.base import BaseCommand
from matplotlib import pyplot as plt, mlab
from matplotlib import gridspec
from FRS.settings import SUPPORTED_YEARS
from TBAW.models import Team
import numpy as np
import numpy.random as nprand
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--file', dest='f', default='', type=str,
help="Read elo data from file, requires a column for ids and a column for elo scores.")
parser.add_argument('--start', dest='start-year', default=SUPPORTED_YEARS[0], type=int, metavar="from_year",
help="Start year of range from which to select teams. They must at least participate in this year.")
parser.add_argument('--end', dest='end-year', default=SUPPORTED_YEARS[-1], type=int, metavar="to_year",
help="End year of range from which to select teams. They must have participated in one match before or during this year.")
def handle(self, *args, **options):
if options['f'] is '':
start_year = options['start-year']
end_year = options['end-year']
print("For years in range [%d, %d]" % (start_year, end_year))
_query = Team.objects.filter(event__year__range=(start_year, end_year)).exclude(elo_mu=1500.0).distinct().order_by('id')
elo_array, id_array = [list(x) for x in zip(*[(t.elo_mu, t.id) for t in _query.all()])]
else:
raise NotImplementedError("Cannot yet read any file types.")
gs = gridspec.GridSpec(2, 3, width_ratios=[1, 1, 2])
# Span topmost row
plt.subplot(gs[0, :])
num_bins = 100
elo_mean = np.mean(elo_array)
elo_var = np.var(elo_array)
elo_stddev = np.sqrt(elo_var)
elo_median = np.median(elo_array)
inner = [x for x in elo_array if elo_mean - elo_stddev<= x <= elo_mean + elo_stddev]
print("%f%% of elos fall within 1 stddev (%f, %f)" % (100 * len(inner) / len(elo_array), elo_mean - elo_stddev, elo_mean + elo_stddev))
inner = [x for x in elo_array if elo_mean - 2*elo_stddev <= x <= elo_mean + 2*elo_stddev]
print("%f%% of elos fall within 2 stddev (%f, %f)" % (100 * len(inner) / len(elo_array), elo_mean - 2*elo_stddev, elo_mean + 2*elo_stddev))
inner = [x for x in elo_array if elo_mean - 3*elo_stddev <= x <= elo_mean + 3*elo_stddev]
print("%f%% of elos fall within 3 stddev (%f, %f)" % (100 * len(inner) / len(elo_array), elo_mean - 3*elo_stddev, elo_mean + 3*elo_stddev))
# Probability Distribution and Fit Line
n, bins, patches = plt.hist(elo_array, num_bins, normed=True, facecolor='black', alpha=0.9)
y = mlab.normpdf(bins, elo_mean, elo_stddev)
plt.plot(bins, y, 'r--')
plt.title("Current ELO Distribution")
plt.xlabel("ELO")
plt.ylabel("Frequency")
# Mean and Median lines w/ Labels
median_label_fnt = {
'size': 'small',
'weight': 'roman',
'ha': 'left'
}
mean_label_fnt = median_label_fnt.copy()
mean_label_fnt['ha'] = 'right'
mean_label_delta = -30
median_label_delta = 30
if elo_median < elo_mean:
mean_label_delta *= -1
median_label_delta *= -1
median_label_fnt['ha'], mean_label_fnt['ha'] = mean_label_fnt['ha'], median_label_fnt['ha']
_, max_freq = plt.ylim()
plt.axvline(x=elo_median, color="magenta")
plt.text(elo_median+median_label_delta, max_freq/10, 'Median:\n%.3f' % elo_median, fontdict=median_label_fnt, color="magenta")
plt.axvline(x=elo_mean, color="orange")
plt.text(elo_mean+mean_label_delta, max_freq/10, 'Mean:\n%.3f' % elo_mean, fontdict=mean_label_fnt, color="orange")
# Std Deviation
plt.text(10, max_freq*.99, "Std Dev: %.3f" % elo_stddev, va='top')
plt.subplots_adjust(left=0.15, hspace=0.5, wspace=0.3)
# Plot 2nd subplot in second row, same grid
ax = plt.subplot(gs[1, 0:2])
plt.scatter(id_array, elo_array)
plt.xlabel("Team Number")
plt.ylabel("ELO")
ax.set_title("Elo by Team Number")
# Normal Probability Plot
ax = plt.subplot(gs[1, 2])
actual_data = elo_array[:]
actual_data.sort()
normal_data = nprand.normal(elo_mean, elo_stddev, size = len(elo_array))
normal_data.sort()
plt.plot(normal_data, actual_data, "o")
z = np.polyfit(normal_data, actual_data, 1)
p = np.poly1d(z)
plt.plot(normal_data, p(normal_data), "r--")
ax.set_title("Normal Probability Plot")
plt.xlabel("Theoretical ELO (If Normal)")
plt.ylabel("Actual ELO")
(xmin, xmax) = plt.xlim()
ax.margins(x=0.05)
ax.set_xticks(np.arange(xmin, xmax, 300))
ax.set_xticks(np.arange(xmin, xmax, 50), minor=True)
plt.show()
| mit |
noobermin/sharks-butts | bin/pextrect.py | 1 | 4085 | #!/usr/bin/env python2
'''
Plot a charge vs. two other components, x and y of a pext.py output.
Usage:
pextrect.py [options] <input> <x> <y> [<output>]
Options:
--x-bins=BINS -x BINS Set the number of x bins. [default: 100]
--y-bins=BINS -y BINS Set the number of y bins. [default: 100]
--x-label=LABEL LABEL Set the x label.
--y-label=LABEL LABEL Set the y label.
--xlim=LIM Set limits on the x variable.
--ylim=LIM Set limits on the y variable.
--x-no-restrict Set limits, but don't restrict x.
--y-no-restrict Set limits, but don't restrict y.
--title=TITLE -t TITLE Set the title.
--clabel=CLABEL Set colorbar label. [default: $p C$]
--high-res -H Output a high resolution plt.
--max-Q=MAXQ Set the maximum for the charge (pcolormesh's vmax value).
--normalize Normalize the histogram.
--factor=F Multiply histogram by F. [default: 1.0]
'''
import numpy as np;
import matplotlib.pyplot as plt;
import cPickle as pickle;
from docopt import docopt;
from pys import conv,test;
from lspplot.cmaps import pastel;
def restrict(x,xlim):
return x[good];
def pextrect(d,xname,yname,**kw):
xlim = kw['xlim'] if test(kw,'xlim') else None;
ylim = kw['ylim'] if test(kw,'ylim') else None;
x_spacing = kw['x_spacing'] if test(kw,'x_spacing') else 100;
y_spacing = kw['y_spacing'] if test(kw,'y_spacing') else 100;
xlabel = kw['xlabel'] if test(kw,'xlabel') else xname;
ylabel = kw['ylabel'] if test(kw,'ylabel') else yname;
x = d[xname];
y = d[yname];
F = kw['F'] if test(kw,'F') else 1.0;
good = np.ones(len(x)).astype(bool);
if xlim and not test(kw,'x_no_restrict'):
good &= (x >= xlim[0]) & (x <= xlim[1]);
else:
xlim = (x.min(), x.max());
if ylim and not test(kw,'y_no_restrict'):
good &= (y >= ylim[0]) & (y <= ylim[1]);
else:
ylim = (x.min(), x.max());
s =-d['q'][good]*1e6*F;
x = x[good];
y = y[good];
maxQ = kw['maxQ'] if test(kw,'maxQ') else None;
x_bins = np.linspace(xlim[0],xlim[1],x_spacing+1);
y_bins = np.linspace(ylim[0],ylim[1],y_spacing+1);
X,Y = np.mgrid[ xlim[0] : xlim[1] : x_spacing*1j,
ylim[0] : ylim[1] : y_spacing*1j];
S,_,_ = np.histogram2d(x,y,bins=(x_bins,y_bins),weights=s);
if test(kw,'normalize'):
S /= np.abs(xlim[1]-xlim[0])/x_spacing;
S /= np.abs(ylim[1]-ylim[0])/y_spacing;
fig = kw['fig'] if test(kw,'fig') else plt.figure(1);
ax = kw['ax'] if test(kw,'ax') else plt.subplot();
plt.xlim(xlim);
plt.ylim(ylim);
plt.xlabel(xlabel);
plt.ylabel(ylabel);
surf=ax.pcolormesh(X,Y,S,cmap=pastel,vmax=maxQ);
c=fig.colorbar(surf,pad=0.075);
if test(kw,'clabel'):
c.set_label(kw['clabel']);
if test(kw,'title'):
ax.set_title(kw['title'],fontdict={'size':28});
pass;
def main():
opts = docopt(__doc__,help=True);
inname = opts['<input>'];
outname = opts['<output>'];
xname = opts['<x>'];
yname = opts['<y>'];
with open(inname,'r') as f:
d = pickle.load(f);
kw = {
'xlim': opts['--xlim'],
'ylim': opts['--ylim'],
'xlabel': opts['--x-label'],
'ylabel': opts['--y-label'],
'x_spacing': float(opts['--x-bins']),
'y_spacing': float(opts['--y-bins']),
'F': float(opts['--factor']),
'maxQ': float(opts['--max-Q']) if opts['--max-Q'] else None,
'clabel': opts['--clabel'],
'normalize': opts['--normalize'],
'fig': None,
'ax': None
};
pextrect(d,xname,yname,**kw);
if opts['--title']:
plt.title(opts['--title'],loc='left',fontdict={'fontsize':28});
if outname:
if opts['--high-res']:
plt.savefig(outname,dpi=1000);
else:
plt.savefig(outname);
else:
plt.show();
pass;
if __name__ == "__main__":
main();
| mit |
RoyBoy432/Emergence-Senescence | figure_code/MacroecologyPatterns/MetabolicScaling.py | 8 | 3155 | from __future__ import division
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import scipy as sc
from scipy import stats
import statsmodels.stats.api as sms
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.stats.outliers_influence import summary_table
def xfrm(X, _max): return -np.log10(_max - np.array(X))
def figplot(x, y, xlab, ylab, fig, n, binned = 1):
'''main figure plotting function'''
fig.add_subplot(3, 3, n)
x = np.log10(x)
y = np.log10(y)
y2 = list(y)
x2 = list(x)
if binned == 1:
X, Y = (np.array(t) for t in zip(*sorted(zip(x2, y2))))
Xi = xfrm(X, max(X)*1.05)
bins = np.linspace(np.min(Xi), np.max(Xi)+1, 100)
ii = np.digitize(Xi, bins)
y2 = np.array([np.mean(Y[ii==i]) for i in range(1, len(bins)) if len(Y[ii==i]) > 0])
x2 = np.array([np.mean(X[ii==i]) for i in range(1, len(bins)) if len(X[ii==i]) > 0])
d = pd.DataFrame({'size': list(x2)})
d['rate'] = list(y2)
f = smf.ols('rate ~ size', d).fit()
coef = f.params[1]
st, data, ss2 = summary_table(f, alpha=0.05)
fitted = data[:,2]
mean_ci_low, mean_ci_upp = data[:,4:6].T
ci_low, ci_upp = data[:,6:8].T
x2, y2, fitted, ci_low, ci_upp = zip(*sorted(zip(x2, y2, fitted, ci_low, ci_upp)))
plt.scatter(x2, y2, color = 'SkyBlue', alpha= 1 , s = 12, linewidths=0.5, edgecolor='Steelblue')
plt.fill_between(x2, ci_upp, ci_low, color='b', lw=0.1, alpha=0.15)
plt.plot(x2, fitted, color='b', ls='--', lw=1.0, alpha=0.9)
plt.xlabel(xlab, fontsize=10)
plt.ylabel(ylab, fontsize=10)
plt.tick_params(axis='both', labelsize=6)
plt.xlim(0.9*min(x2), 1.1*max(x2))
plt.ylim(min(ci_low), max(ci_upp))
plt.title('$z$ = '+str(round(coef, 2)), fontsize=10)
return fig
mydir = os.path.expanduser('~/GitHub/Emergence')
tools = os.path.expanduser(mydir + "/tools")
df = pd.read_csv(mydir + '/results/simulated_data/SimData.csv')
df2 = pd.DataFrame({'length' : df['length'].groupby(df['sim']).mean()})
df2['NS'] = df['avg.pop.size'].groupby(df['sim']).mean()
state = 'all'
df2['Biomass'] = df[state+'.biomass'].groupby(df['sim']).mean()
df2['size'] = df[state+'.size'].groupby(df['sim']).mean()
df2['M'] = df[state+'.avg.per.capita.maint'].groupby(df['sim']).mean()
df2['MF'] = df[state+'.avg.per.capita.mf'].groupby(df['sim']).mean()
df2['B'] = (df2['M']*df2['MF']) * df2['size']
df2['MSB'] = df2['B']/df2['size']
df2['Pdens'] = (df2['Biomass'])/(df2['length']**2)
#df2 = df2[np.log10(df2['size']) > 1]
fig = plt.figure(figsize=(8, 7))
xlab = r"$log_{10}$"+'(Body size)'
ylab = r"$log_{10}$"+'(Metabolic rate)'
fig = figplot(df2['size'], df2['B'], xlab, ylab, fig, 1)
xlab = r"$log_{10}$"+'(Body size)'
ylab = r"$log_{10}$"+'(Mass specific rate)'
fig = figplot(df2['size'], df2['MSB'], xlab, ylab, fig, 2)
xlab = r"$log_{10}$"+'(Body size)'
ylab = r"$log_{10}$"+'(Pop. density)'
fig = figplot(df2['size'], df2['Pdens'], xlab, ylab, fig, 3)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.savefig(mydir + '/results/figures/MTE.png', dpi=200, bbox_inches = "tight")
| gpl-3.0 |
fmgvalente/agent | modules/test.py | 1 | 1661 | import os
def create_execution_script(**options):
import numpy as np
from sklearn.externals import joblib
inputs = options['input']
channels = options['channel']
for input_element, input_channel in zip(inputs,channels):
elem_data = input_element.data(input_channel)
if 'datatype' in elem_data:
if(elem_data['datatype'] == 'dataset'):
np.save(options['workdir']+"/data", elem_data['data'])
np.save(options['workdir']+"/target", elem_data['target'])
if(elem_data['datatype'] == 'model'):
joblib.dump(elem_data['model'], options['workdir']+"/svm.pkl")
else:
return "ERROR: input not expected"
script_string = "python3 "+ os.path.realpath(__file__) + " "+options['workdir']
return script_string
def data(workdir, channel, **options):
import numpy as np
return {
'datatype':'test',
'score': np.load(workdir+'/scores.npy')
}
if __name__ == "__main__":
import sys
import numpy as np
from sklearn.externals import joblib
workdir = sys.argv[1]
data = np.load(workdir+"/data.npy")
target = np.load(workdir+"/target.npy")
classifier = joblib.load(workdir+"/svm.pkl")
scores = classifier.score(data, target)
np.save(workdir+'/scores', scores)
#num_folds = 10
#scoring_func = "f1"
#print ("Cross-validating", num_folds, "fold...")
#kfold = xval.StratifiedKFold(y=classifications, n_folds=num_folds)
#scores = xval.cross_val_score(estimator=pipe, X=features, y=classifications, cv=kfold, scoring=scoring_func, n_jobs=-1)
#np.save(scores)
| gpl-3.0 |
trungnt13/scikit-learn | examples/svm/plot_svm_margin.py | 318 | 2328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
srowen/spark | python/pyspark/pandas/categorical.py | 15 | 5290 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import TYPE_CHECKING
import pandas as pd
from pandas.api.types import CategoricalDtype
if TYPE_CHECKING:
import pyspark.pandas as ps # noqa: F401 (SPARK-34943)
class CategoricalAccessor(object):
"""
Accessor object for categorical properties of the Series values.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.categories
Index(['a', 'b', 'c'], dtype='object')
>>> s.cat.codes
0 0
1 1
2 1
3 2
4 2
5 2
dtype: int8
"""
def __init__(self, series: "ps.Series"):
if not isinstance(series.dtype, CategoricalDtype):
raise ValueError("Cannot call CategoricalAccessor on type {}".format(series.dtype))
self._data = series
@property
def categories(self) -> pd.Index:
"""
The categories of this categorical.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.categories
Index(['a', 'b', 'c'], dtype='object')
"""
return self._data.dtype.categories
@categories.setter
def categories(self, categories: pd.Index) -> None:
raise NotImplementedError()
@property
def ordered(self) -> bool:
"""
Whether the categories have an ordered relationship.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.ordered
False
"""
return self._data.dtype.ordered
@property
def codes(self) -> "ps.Series":
"""
Return Series of codes as well as the index.
Examples
--------
>>> s = ps.Series(list("abbccc"), dtype="category")
>>> s # doctest: +SKIP
0 a
1 b
2 b
3 c
4 c
5 c
dtype: category
Categories (3, object): ['a', 'b', 'c']
>>> s.cat.codes
0 0
1 1
2 1
3 2
4 2
5 2
dtype: int8
"""
return self._data._with_new_scol(self._data.spark.column).rename()
def add_categories(self, new_categories: pd.Index, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def as_ordered(self, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def as_unordered(self, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def remove_categories(self, removals: pd.Index, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def remove_unused_categories(self) -> "ps.Series":
raise NotImplementedError()
def rename_categories(self, new_categories: pd.Index, inplace: bool = False) -> "ps.Series":
raise NotImplementedError()
def reorder_categories(
self, new_categories: pd.Index, ordered: bool = None, inplace: bool = False
) -> "ps.Series":
raise NotImplementedError()
def set_categories(
self,
new_categories: pd.Index,
ordered: bool = None,
rename: bool = False,
inplace: bool = False,
) -> "ps.Series":
raise NotImplementedError()
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.categorical
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.categorical.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.categorical tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.categorical,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
zitouni/gnuradio-3.6.1 | gnuradio-core/src/examples/pfb/decimate.py | 17 | 5706 | #!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 10000000 # number of samples to use
self._fs = 10000 # initial sampling rate
self._decim = 20 # Decimation rate
# Generate the prototype filter taps for the decimators with a 200 Hz bandwidth
self._taps = gr.firdes.low_pass_2(1, self._fs, 200, 150,
attenuation_dB=120, window=gr.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._decim))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._decim
print "Taps per channel: ", tpc
# Build the input signal source
# We create a list of freqs, and a sine wave is generated and added to the source
# for each one of these frequencies.
self.signals = list()
self.add = gr.add_cc()
freqs = [10, 20, 2040]
for i in xrange(len(freqs)):
self.signals.append(gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freqs[i], 1))
self.connect(self.signals[i], (self.add,i))
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct a PFB decimator filter
self.pfb = blks2.pfb_decimator_ccf(self._decim, self._taps, 0)
# Construct a standard FIR decimating filter
self.dec = gr.fir_filter_ccf(self._decim, self._taps)
self.snk_i = gr.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Create the sink for the decimated siganl
self.snk = gr.vector_sink_c()
self.connect(self.pfb, self.snk)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(16,9))
fig2 = pylab.figure(2, figsize=(16,9))
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input to the decimator
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b")
p1_t = sp1_t.plot(t_in, x_in.imag, "r")
sp1_t.set_ylim([-tb._decim*1.1, tb._decim*1.1])
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot the output of the decimator
fs_o = tb._fs / tb._decim
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk.data()[Ns:Ns+Ne]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("PFB Decimated Signal", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o.real, "b-o")
p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
ChangLab/FAST-iCLIP | bin/oldscripts/fastclip_icountData.py | 2 | 65861 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import os
import cmath
import math
import sys
import numpy as np
import glob
import subprocess
import re
from matplotlib_venn import venn2
import pandas as pd
from collections import defaultdict
from operator import itemgetter
import matplotlib as mpl
import matplotlib.pyplot as plt
import shutil
from optparse import OptionParser
mpl.rcParams['savefig.dpi'] = 2 * mpl.rcParams['savefig.dpi']
# <codecell>
global sampleName
global outfilepath
global logFile
global logOpen
### File name ###
sampleName=sys.argv[1]
infilepath=os.getcwd() + '/' + 'rawdata/'
outfilepath=os.getcwd() + '/results/%s/'%sampleName
# <codecell>
# Create log and start pipeline
logFile=outfilepath + "runLog"
logOpen=open(logFile, 'w')
# <codecell>
### Parameters ###
iCLIP3pBarcode='AGATCGGAAGAGCGGTTCAG' # Barcode sequence to trim from reads.
q=25 # Minimum quality score to keep during filtering.
p=80 # Percentage of bases that must have quality > q during filtering.
iCLIP5pBasesToTrim=9 # Number of reads to trim from 5' end of clip reads.
k='1' # k=N distinct, valid alignments for each read in bt2 mapping.
threshold=2 # Sum of RT stops (for both replicates) required to keep file.
expand=15 # Bases to expand around RT position after RT stops are merged.
repeat_index=os.getcwd() + '/docs/repeat/rep' # bt2 index for repeat RNA.
repeatGenomeBuild=os.getcwd()+'/docs/repeat/repeatRNA.fa' # Sequence of repeat index.
repeatAnnotation=os.getcwd()+'/docs/repeat/Hs_repeatIndex_positions.txt' # Repeat annotation file.
start18s=3657
end18s=5527
start5s=6623
end5s=6779
start28s=7935
end28s=12969
rRNAend=13314
threshold_rep=0 # RT stop threshold for repeat index.
index=os.getcwd() + '/docs/hg19/hg19' # bt2 index for mapping.
index_tag='hg19' # Name of bt2 index.
genomeFile=os.getcwd()+'/docs/human.hg19.genome' # Genome file for bedGraph, etc.
genomeForCLIPper='-shg19' # Parameter for CLIPper.
blacklistregions=os.getcwd()+'/docs/wgEncodeDukeMapabilityRegionsExcludable.bed' # Blacklist masker.
repeatregions=os.getcwd()+'/docs/repeat_masker.bed' # Repeat masker.
geneAnnot=glob.glob(os.getcwd()+'/docs/genes_types/*') # List of genes by type.
snoRNAmasker=os.getcwd()+'/docs/snoRNA_reference/snoRNAmasker_formatted_5pExtend.bed' # snoRNA masker file.
miRNAmasker=os.getcwd()+'/docs/miR_sort_clean.bed' # miRNA masker file.
fivePUTRBed=os.getcwd()+'/docs/5pUTRs_Ensbl_sort_clean_uniq.bed' # UTR annotation file.
threePUTRBed=os.getcwd()+'/docs/3pUTRs_Ensbl_sort_clean_uniq.bed' # UTR annotation file.
cdsBed=os.getcwd()+'/docs/Exons_Ensbl_sort_clean_uniq.bed' # UTR annotation file.
utrFile=os.getcwd()+'/docs/hg19_ensembl_UTR_annotation.txt' # UTR annotation file.
genesFile=os.getcwd()+'/docs/hg19_ensembl_genes.txt' # Gene annotation file.
sizesFile=os.getcwd()+'/docs/hg19.sizes' # Genome sizes file.
snoRNAindex=os.getcwd()+'/docs/snoRNA_reference/sno_coordinates_hg19_formatted.bed' # snoRNA coordinate file.
CLIPPERoutNameDelim='_' # Delimiter that for splitting gene name in the CLIPper windows file.
# <codecell>
import datetime
now=datetime.datetime.now()
logOpen.write("Timestamp:%s\n"%str(now))
logOpen.write("\n###Parameters used###\n")
logOpen.write("3' barcode:%s\n'"%iCLIP3pBarcode)
logOpen.write("Minimum quality score (q):%s\n"%q)
logOpen.write("Percentage of bases with > q:%s\n"%p)
logOpen.write("5' bases to trim:%s\n'"%iCLIP5pBasesToTrim)
logOpen.write("k distinct, valid alignments for each read in bt2 mapping:%s\n"%k)
logOpen.write("Threshold for minimum number of RT stops:%s\n"%threshold)
logOpen.write("Bases for expansion around conserved RT stops:%s\n"%expand)
logOpen.write("\n\n\n")
# <codecell>
print "Processing sample %s" %(sampleName)
logOpen.write("Processing sample: "+sampleName+'\n')
read1=infilepath+sampleName+'_R1.fastq'
read2=infilepath+sampleName+'_R2.fastq'
unzippedreads=[read1,read2]
# <codecell>
def trimReads3p(unzippedreads,adapter3p):
# Usage: Trims a specified adapter sequence from the 3p end of the reads.
# Input: List of fastq files.
# Output: List of 3p trimmed files.
trimparam='-a'+adapter3p # Adapter string
trimmedReads=[]
try:
for inread in unzippedreads:
outread=inread.replace("rawdata/", "results/%s/"%sampleName)
outread=outread.replace(".fastq", "_3ptrimmed.fastq")
process=subprocess.Popen(['fastx_clipper',trimparam,'-n','-l33','-Q64','-i',inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
logOpen.write("Trim 3p end of reads.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
trimmedReads=trimmedReads+[outread]
return trimmedReads
except:
logOpen.write("Problem with 3p trimming.\n")
print "Problem with 3p trimming."
print "Trim 3p adapter from reads."
trimmedReads3p=trimReads3p(unzippedreads,iCLIP3pBarcode)
# <codecell>
def qualityFilter(trim3pReads,q,p):
# Usage: Filters reads based upon quality score.
# Input: List of fastq file names as well as the quality paramters p and q.
# Output: List of modified fastq file names.
qualityparam='-q'+str(q)
percentrageparam='-p'+str(p)
filteredReads=[]
try:
for inread in trim3pReads:
outread=inread.replace(".fastq", "_filter.fastq")
process=subprocess.Popen(['fastq_quality_filter',qualityparam,percentrageparam,'-Q64','-i',inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr=process.communicate()
logOpen.write("Perform quality filtering.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
filteredReads=filteredReads+[outread]
return filteredReads
except:
logOpen.write("Problem with quality filter.\n")
print "Problem with quality filter."
print "Perform quality filtering."
filteredReads=qualityFilter(trimmedReads3p,q,p)
# <codecell>
def dupRemoval(filteredReads):
# Usage: Removes duplicate reads.
# Input: List of fastq file names.
# Output: List of reads in FASTA format.
program=os.getcwd() + '/bin/fasta_to_fastq.pl'
noDupes=[]
try:
for inread in filteredReads:
outread=inread.replace(".fastq","_nodupe.fasta")
process=subprocess.Popen(['fastx_collapser','-Q64','-i',inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr=process.communicate()
logOpen.write("Perform duplicate removal.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
fastqOut=outread.replace('.fasta', '.fastq') # fastx_collapser returns fasta files, which are then converted to fastq.
outfh=open(fastqOut, 'w')
process=subprocess.Popen(['perl',program,outread],stdout=outfh)
process.communicate() # Wait for the process to complete.
os.remove(outread) # Remove the remaining .fasta file.
noDupes=noDupes+[fastqOut]
return noDupes
except:
logOpen.write("Problem with duplicate removal.\n")
print "Problem with duplicate removal."
print "Perform duplicate removal."
nodupReads=dupRemoval(filteredReads)
# <codecell>
def trimReads5p(nodupes,n):
# Usage: Trims a specified number of bases from the 5' end of each read.
# Input: List of fastq files.
# Output: List of 5p trimmed files.
trimparam='-f'+str(n)
trimmedReads=[]
try:
for inread in nodupes:
outread=inread.replace(".fastq", "_5ptrimmed.fastq")
process=subprocess.Popen(['fastx_trimmer', trimparam, '-Q64', '-i', inread,'-o',outread],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout, stderr=process.communicate()
logOpen.write("Perform 5' barcode trimming.\n")
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
trimmedReads=trimmedReads+[outread]
return trimmedReads
except:
logOpen.write("Problem with 5' barcode trimming.\n")
print "Problem with 5' barcode trimming."
print "Perform 5' barcode trimming."
trimmedReads5p=trimReads5p(nodupReads,iCLIP5pBasesToTrim)
# <codecell>
def runBowtie(fastqFiles,index,index_tag):
# Usage: Read mapping to reference.
# Input: Fastq files of replicate trimmed read files.
# Output: Path to samfile for each read.
program='bowtie2'
mappedReads=[]
unMappedReads=[]
try:
for infastq in fastqFiles:
outfile=infastq.replace(".fastq","_mappedTo%s.sam"%index_tag)
unmapped=infastq.replace(".fastq","_notMappedTo%s.fastq"%index_tag)
process=subprocess.Popen([program,'-x',index,'-k',k,'-U',infastq,'--un',unmapped,'-S',outfile],stderr=subprocess.STDOUT,stdout=subprocess.PIPE)
stdout,stderr=process.communicate()
logOpen.write("Perform mapping to %s index.\n"%index_tag)
logOpen.write("Stdout: %s.\n"%stdout)
logOpen.write("Stderr: %s.\n"%stderr)
mappedReads = mappedReads + [outfile]
unMappedReads = unMappedReads + [unmapped]
return (mappedReads,unMappedReads)
except:
logOpen.write("Problem with mapping.\n")
print "Problem with mapping."
print "Run mapping to repeat index."
mappedReads_rep,unmappedReads_rep=runBowtie(trimmedReads5p,repeat_index,'repeat')
# <codecell>
def runSamtools(samfiles):
# Usage: Samfile processing.
# Input: Sam files from Bowtie mapping.
# Output: Sorted bedFiles.
program = 'samtools'
program2 = 'bamToBed'
outBedFiles=[]
try:
for samfile in samfiles:
bamfile = samfile.replace('.sam','.bam')
proc = subprocess.Popen( [program,'view','-bS','-o', bamfile, samfile])
proc.communicate()
bamfile_sort = bamfile.replace('.bam','_sorted')
proc2 = subprocess.Popen([program,'sort',bamfile, bamfile_sort])
proc2.communicate()
bedFile = bamfile_sort.replace('_sorted', '_withDupes.bed')
outfh = open(bedFile,'w')
proc3 = subprocess.Popen( [program2,'-i', bamfile_sort+'.bam'],stdout=outfh)
proc3.communicate()
outBedFiles=outBedFiles+[bedFile]
return outBedFiles
except:
logOpen.write("Problem with samtools.\n")
print "Problem with samtools."
print "Run samtools."
logOpen.write("Run samtools.\n")
mappedBedFiles_rep=runSamtools(mappedReads_rep)
# <codecell>
def seperateStrands(mappedReads):
# Usage: Seperate positive and negative strands.
# Input: Paths to two bed files from Samtools.
# Output: Paths to bed files isolated by strand.
negativeStrand=[]
positiveStrand=[]
for mapFile in mappedReads:
with open(mapFile, 'r') as infile:
neg_strand=mapFile.replace('.bed','_neg.bed')
pos_strand=mapFile.replace('.bed','_pos.bed')
neg = open(neg_strand, 'w')
pos = open(pos_strand, 'w')
negativeStrand=negativeStrand+[neg_strand]
positiveStrand=positiveStrand+[pos_strand]
for line in infile:
if str(line.strip().split('\t')[5]) == '-':
neg.write(line)
elif str(line.strip().split('\t')[5]) == '+':
pos.write(line)
return (negativeStrand,positiveStrand)
def modifyNegativeStrand(negativeStrandReads):
# Usage: For negative stranded reads, ensure 5' position (RT stop) is listed first.
# Input: Bed file paths to all negative stranded.
# Output: Paths to modified bed files.
negativeStrandEdit=[]
for negativeRead in negativeStrandReads:
neg_strand_edited=negativeRead.replace('_neg.bed','_negEdit.bed')
negativeStrandEdit=negativeStrandEdit+[neg_strand_edited]
neg_edit = open(neg_strand_edited, 'w')
with open(negativeRead, 'r') as infile:
for line in infile:
chrom,start,end,name,quality,strand=line.strip().split('\t')
neg_edit.write('\t'.join((chrom,end,str(int(end)+30),name,quality,strand))+'\n')
return negativeStrandEdit
def isolate5prime(strandedReads):
# Usage: Isolate only the Chr, 5' position (RT stop), and strand.
# Input: Bed file paths to strand seperated reads.
# Output: Paths RT stop files.
RTstops=[]
for reads in strandedReads:
RTstop=reads.replace('.bed','_RTstop.bed')
f = open(RTstop, 'w')
with open(reads, 'r') as infile:
RTstops=RTstops+[RTstop]
for line in infile:
chrom,start,end,name,quality,strand=line.strip().split('\t')
f.write('\t'.join((chrom,start,strand))+'\n')
return RTstops
print "RT stop isolation (repeat)."
logOpen.write("RT stop isolation (repeat).\n")
readsByStrand_rep=seperateStrands(mappedBedFiles_rep)
negativeRTstop_rep=isolate5prime(modifyNegativeStrand(readsByStrand_rep[0]))
positiveRTstop_rep=isolate5prime(readsByStrand_rep[1])
# <codecell>
def fileCat(destinationFile,fileList):
f = open(destinationFile, "w")
for tempfile in fileList:
readfile = open(tempfile, "r")
f.write(readfile.read())
readfile.close()
f.close()
def RTcounts(RTfile):
posRT_R1=pd.DataFrame(pd.read_table(RTfile,index_col=None,header=None,sep='\t'))
posRT_R1.columns=['Chr','Start','Strand']
cts=posRT_R1.groupby(['Chr','Start']).size()
return cts
def mergeRT(RTstopFiles,outfilename,threshold,expand,strand):
# Usage: Merge RT stops between replicates and keep only those positions that exceed threshold.
# Input: Files with RT stops for each replicate, outfile, threshold, strand, and bases to expand around RT stop.
# Output: None. Writes merged RT stop file.
cts_R1=RTcounts(RTstopFiles[0])
cts_R2=RTcounts(RTstopFiles[1])
m=pd.concat([cts_R1,cts_R2],axis=1,join='inner')
m.columns=['Rep_1','Rep_2']
m['Sum']=m['Rep_1']+m['Rep_2']
m_filter=m[m['Sum']>threshold]
f = open(outfilename, 'w')
for i in m_filter.index:
chrom=i[0]
RT=i[1]
count=m_filter.loc[i,'Sum']
if RT > expand:
read='\t'.join((chrom,str(int(RT)-expand),str(int(RT)+expand),'CLIPread','255',strand))+'\n'
else:
read='\t'.join((chrom,str(int(RT)),str(int(RT)+expand),'CLIPread','255',strand))+'\n'
f.write(read*(count))
print "Merge RT stops."
logOpen.write("Merge RT stops.\n")
posMerged=outfilepath+sampleName+'_repeat_positivereads.mergedRT'
strand='+'
mergeRT(positiveRTstop_rep,posMerged,threshold_rep,expand,strand)
negMerged=outfilepath+sampleName+'_repeat_negativereads.mergedRT'
strand='-'
mergeRT(negativeRTstop_rep,negMerged,threshold_rep,expand,strand)
negAndPosMerged=outfilepath+sampleName+'_threshold=%s'%threshold_rep+'_repeat_allreads.mergedRT.bed'
fileCat(negAndPosMerged,[posMerged,negMerged])
# <codecell>
print "Run mapping to %s."%index_tag
mappedReads,unmappedReads=runBowtie(unmappedReads_rep,index,index_tag)
# <codecell>
print "Run samtools."
logOpen.write("Run samtools.\n")
mappedBedFiles=runSamtools(mappedReads)
# <codecell>
def runRepeatMask(mappedReads,repeatregions):
# Usage: Remove repeat regions from bedfile following mapping.
# Input: .bed file after mapping (duplicates removed by samtools) and blastlist regions removed.
# Output: Bedfile with repeat regions removed.
program='intersectBed'
masked=[]
try:
for bedIn in mappedReads:
noRepeat=bedIn.replace('.bed','_noRepeat.bed')
outfh=open(noRepeat, 'w')
proc=subprocess.Popen([program,'-a',bedIn,'-b',repeatregions,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
masked=masked+[noRepeat]
return (masked)
except:
print "Problem with repeat masking."
logOpen.write("Problem with repeat masking.\n")
def runBlacklistRegions(mappedReads,blacklistregions):
# Usage: Remove blacklisted regions from bedfile following mapping.
# Input: .bed file after mapping (duplicates removed by samtools).
# Output: Bedfile with blacklisted regions removed.
program='intersectBed'
blackListed=[]
try:
for bedIn in mappedReads:
noBlacklist=bedIn.replace('.bed','_noBlacklist.bed')
outfh=open(noBlacklist, 'w')
proc=subprocess.Popen([program,'-a',bedIn,'-b',blacklistregions,'-v'],stdout=outfh)
proc.communicate()
outfh.close()
blackListed=blackListed+[noBlacklist]
return (blackListed)
except:
print "Problem with blacklist."
logOpen.write("Problem with blacklist.\n")
print "Run repeat and blacklist region masker."
logOpen.write("Run repeat and blacklist masker.\n")
blacklistedBedFiles=runBlacklistRegions(mappedBedFiles,blacklistregions)
maskedBedFiles=runRepeatMask(blacklistedBedFiles,repeatregions)
# <codecell>
print "RT stop isolation."
logOpen.write("RT stop isolation.\n")
readsByStrand=seperateStrands(maskedBedFiles)
negativeRTstop=isolate5prime(modifyNegativeStrand(readsByStrand[0]))
positiveRTstop=isolate5prime(readsByStrand[1])
print "Merge RT stops."
logOpen.write("Merge RT stops.\n")
posMerged=outfilepath+sampleName+'_%s_positivereads.mergedRT'%index_tag
strand='+'
mergeRT(positiveRTstop,posMerged,threshold,expand,strand)
negMerged=outfilepath+sampleName+'_%s_negativereads.mergedRT'%index_tag
strand='-'
mergeRT(negativeRTstop,negMerged,threshold,expand,strand)
negAndPosMerged=outfilepath+sampleName+'_threshold=%s'%threshold+'_%s_allreads.mergedRT.bed'%index_tag
fileCat(negAndPosMerged,[posMerged,negMerged])
# <codecell>
def runCLIPPER(RTclusterfile,genome,genomeFile):
# Useage: Process the mergedRT file and pass through CLIPper FDR script.
# Input: Merged RT file.
# Output: CLIPper input (.bed) file and output file.
program='bedToBam'
program2='samtools'
program3='bamToBed'
program4='clipper'
bamfile=RTclusterfile.replace('.bed','.bam')
outfh=open(bamfile, 'w')
proc=subprocess.Popen([program,'-i',RTclusterfile,'-g',genomeFile],stdout=outfh)
proc.communicate()
bamfile_sort=bamfile.replace('.bam','.srt')
proc2=subprocess.Popen([program2,'sort',bamfile,bamfile_sort])
proc2.communicate()
bamfile_sorted=bamfile_sort+'.bam'
mapStats=bamfile_sorted.replace('.srt.bam','.mapStats.txt')
outfh=open(mapStats, 'w')
proc3=subprocess.Popen([program2,'flagstat',bamfile_sorted],stdout=outfh)
proc3.communicate()
proc4=subprocess.Popen([program2,'index',bamfile_sorted])
proc4.communicate()
CLIPPERin=bamfile_sorted.replace('.srt.bam','_CLIPPERin.bed')
outfh=open(CLIPPERin, 'w')
proc5=subprocess.Popen([program3,'-i',bamfile_sorted],stdout=outfh)
proc5.communicate()
CLIPPERout=CLIPPERin.replace('_CLIPPERin.bed','_CLIP_clusters')
proc6=subprocess.Popen([program4,'--bam',bamfile_sorted,genome,'--outfile=%s'%CLIPPERout],)
proc6.communicate()
outfh.close()
return (CLIPPERin,CLIPPERout)
def makeGeneNameDict(fi):
# Usage: Make a dictionary that maps RT stop to gene name.
# Input: File path to intersected CLIPper windows and input RT stop coordinates.
# Output Dictionary mapping RT stop to name.
nameDict={}
with open(fi, 'r') as infile:
for read in infile:
elementList=read.strip().split('\t')
RT_id='_'.join((elementList[0],elementList[1],elementList[2],elementList[5]))
if RT_id not in nameDict:
geneName=elementList[9].strip().split(CLIPPERoutNameDelim)[0]
nameDict[RT_id]=geneName
return nameDict
def modCLIPPERout(CLIPPERin,CLIPPERout):
# Usage: Process the CLIPper output and isolate lowFDR reads based upon CLIPper windows.
# Input: .bed file passed into CLIPper and the CLIPper windows file.
# Output: Low FDR reads recovered using the CLIPer windows file, genes per cluster, gene list of CLIPper clusters, and CLIPper windows as .bed.
program='intersectBed'
CLIPperOutBed=CLIPPERout+'.bed'
CLIPpeReadsPerCluster=CLIPPERout+'.readsPerCluster'
CLIPpeGeneList=CLIPPERout+'.geneNames'
f = open(CLIPperOutBed,'w')
g = open(CLIPpeReadsPerCluster,'w')
h = open(CLIPpeGeneList,'w')
with open(CLIPPERout,'r') as infile:
for line in infile:
try:
# Note that different versions on CLIPper will report the gene name differently. So, we must handle this.
chrom,start,end,name,stats,strand,start_2,end_2 = line.strip().split('\t')
if CLIPPERoutNameDelim=='_':
readPerCluster=name.strip().split(CLIPPERoutNameDelim)[2]
else:
readPerCluster=(name.strip().split(CLIPPERoutNameDelim)[1]).split('_')[2]
geneName=name.strip().split(CLIPPERoutNameDelim)[0]
f.write('\t'.join((chrom,start,end,name,stats,strand))+'\n')
g.write((readPerCluster+'\n'))
h.write((geneName+'\n'))
except:
print ""
f.close()
g.close()
h.close()
# Intersect input reads with the CLIPper windows, report full result for both, include strand, do not duplicate reads from -a if they interset with multiple windows.
clusterWindowInt=CLIPperOutBed.replace('.bed','_fullClusterWindow.bed')
outfh=open(clusterWindowInt,'w')
proc=subprocess.Popen([program,'-a',CLIPPERin,'-b',CLIPperOutBed,'-wa','-wb','-s'],stdout=outfh)
proc.communicate()
outfh.close()
# Use the full window intersection to make a dictionary mapping RTstop to gene name.
nameDict=makeGeneNameDict(clusterWindowInt)
# Intersect input reads with CLIPper windows, but only report one intersection per read (as reads can overlap with multiple windows).
clusterWindowIntUniq=CLIPperOutBed.replace('.bed','_oneIntPerRead.bed')
outfh=open(clusterWindowIntUniq,'w')
proc=subprocess.Popen([program,'-a',CLIPPERin,'-b',CLIPperOutBed,'-wa','-s','-u'],stdout=outfh)
proc.communicate()
outfh.close()
# Process the uniquly intersected RT stops by adding gene name.
CLIPPERlowFDR=CLIPperOutBed.replace('.bed','_lowFDRreads.bed')
outfh=open(CLIPPERlowFDR,'w')
with open(clusterWindowIntUniq, 'r') as infile:
for read in infile:
bed=read.strip().split('\t')
RT_id='_'.join((bed[0],bed[1],bed[2],bed[5]))
geneName=nameDict[RT_id]
outfh.write('\t'.join((bed[0],bed[1],bed[2],geneName,bed[4],bed[5],'\n')))
outfh.close()
infile.close()
return (CLIPPERlowFDR,CLIPpeReadsPerCluster,CLIPpeGeneList,CLIPperOutBed)
print "Run CLIPper."
logOpen.write("Run CLIPper.\n")
CLIPPERio=runCLIPPER(negAndPosMerged,genomeForCLIPper,genomeFile)
CLIPPERin=CLIPPERio[0]
CLIPPERout=CLIPPERio[1]
clipperStats=modCLIPPERout(CLIPPERin,CLIPPERout)
CLIPPERlowFDR=clipperStats[0] # Low FDR reads returned filtred through CLIPper windows
CLIPpeReadsPerCluster=clipperStats[1] # Number of reads per CLIPper cluster
CLIPpeGeneList=clipperStats[2] # Gene names returned from the CLIPper file
CLIPperOutBed=clipperStats[3] # CLIPper windows as a bed file
# <codecell>
def getBedCenterPoints(inBed):
# Usage: Obtain ceter coordiantes of bedFile.
# Input: BedFile.
# Output: Center coodinates returned.
outBed=inBed.replace('.bed','_centerCoord.bed')
f=open(outBed, 'w')
with open(inBed, 'r') as infile:
for line in infile:
elementList=line.strip().split('\t')
f.write('\t'.join((elementList[0],str(int(elementList[1])+expand),str(int(elementList[1])+expand+1),elementList[3],elementList[4],elementList[5],'\n')))
f.close()
return outBed
def cleanBedFile(inBed):
# Usage: Sort and recover only first 6 fields from a bed file.
# Input: BedFile.
# Output: Sorted bedFile with correct number of fields.
program='sortBed'
CLIPperOutBed=inBed.replace('.bed','_cleaned.bed')
sortedBed=CLIPperOutBed.replace('_cleaned.bed','_cleaned_sorted.bed')
f=open(CLIPperOutBed, 'w')
with open(inBed, 'r') as infile:
for line in infile:
elementList=line.strip().split('\t')
f.write('\t'.join((elementList[0],elementList[1],elementList[2],elementList[3],elementList[4],elementList[5],'\n')))
f.close()
outfh=open(sortedBed, 'w')
proc=subprocess.Popen([program, '-i', CLIPperOutBed],stdout=outfh)
proc.communicate()
outfh.close()
return sortedBed
def makeBedGraph(lowFDRreads,sizesFile):
# Usage: From a bedFile, generate a bedGraph and bigWig.
# Input: BedFile.
# Output: BedGraph file.
program='genomeCoverageBed'
program2=os.getcwd() + '/bin/bedGraphToBigWig'
cleanBed=cleanBedFile(lowFDRreads)
outname=cleanBed.replace('.bed','.bedgraph')
outname2=cleanBed.replace('.bed','.bw')
outfh=open(outname,'w')
proc=subprocess.Popen([program,'-bg','-split','-i',cleanBed,'-g',sizesFile],stdout=outfh)
proc.communicate()
outfh2=open(outname2,'w')
proc2=subprocess.Popen([program2,outname,sizesFile,outname2],stdout=subprocess.PIPE)
proc2.communicate()
return outname
print "Make bedGraph"
logOpen.write("Make bedGraph.\n")
bedGraphCLIPout=makeBedGraph(CLIPPERlowFDR,genomeFile)
CLIPPERlowFDRcenters=getBedCenterPoints(CLIPPERlowFDR)
allLowFDRCentersBedGraph=makeBedGraph(CLIPPERlowFDRcenters,genomeFile)
# <codecell>
def filterSnoRNAs(proteinCodingReads,snoRNAmasker,miRNAmasker):
# Usage: Filter snoRNA and miRNAs from protein coding reads.
# Input: .bed file with protein coding reads.
# Output: snoRNA and miR filtered .bed file.
program='intersectBed'
proteinWithoutsnoRNAs=proteinCodingReads.replace('.bed','_snoRNAremoved.bed')
proteinWithoutmiRNAs=proteinWithoutsnoRNAs.replace('.bed','_miRNAremoved.bed')
outfh=open(proteinWithoutsnoRNAs, 'w')
proc=subprocess.Popen([program,'-a',proteinCodingReads,'-b',snoRNAmasker,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
outfh=open(proteinWithoutmiRNAs, 'w')
proc=subprocess.Popen([program,'-a',proteinWithoutsnoRNAs,'-b',miRNAmasker,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
return (proteinWithoutmiRNAs)
def getLowFDRReadTypes(CLIPPERlowFDR,pathToGeneLists):
# Usage: Given a list of genes, return all reads for the associated genes.
# Input: Gene list and the path to lowFDR read file.
# Output: List of reads assocaited with the given genes.
lowFDRgenelist=[]
for path in pathToGeneLists:
outfile=path+'_LowFDRreads.bed'
proc=subprocess.Popen('grep -F -f %s %s > %s'%(path,CLIPPERlowFDR,outfile),shell=True)
proc.communicate()
return_code=proc.wait() # *** Remove later. ***
lowFDRgenelist=lowFDRgenelist+[outfile]
return lowFDRgenelist
def compareLists(list1,list2,outname):
# Usage: Compare gene lists and output matches to the file.
# Input: Two gene lists.
# Output: Path file containing the matching genes.
f=open(list1,'r')
g=open(list2,'r')
commonGenes=set(f.readlines()) & set(g.readlines())
geneCategory=outname.split('.')[1]
outputName=outfilepath+'clipGenes_'+geneCategory
outfh=open(outputName,'w')
for gene in commonGenes:
outfh.write(gene)
outfh.close()
return outputName
def getLowFDRGeneTypes(CLIPpeGeneList,geneAnnot):
# Usage: Get all genes listed under each type, compare to CLIPper targets.
# Input: .bed file passed into CLIPper and the CLIPper windows file.
# Output: Path to file containing all CLIPper genes of each type.
geneTypes=[]
for genepath in geneAnnot:
lowFDRgenes=compareLists(CLIPpeGeneList,genepath,os.path.split(genepath)[1])
geneTypes=geneTypes+[lowFDRgenes]
return geneTypes
print "Partition reads by type."
logOpen.write("Partition reads by type.\n")
pathToGeneLists=getLowFDRGeneTypes(CLIPpeGeneList,geneAnnot)
pathToReadLists=getLowFDRReadTypes(CLIPPERlowFDR,pathToGeneLists)
proteinCodingReads=outfilepath+'clipGenes_proteinCoding_LowFDRreads.bed'
proteinBedGraph=makeBedGraph(proteinCodingReads,genomeFile)
filteredProteinCodingCenters=filterSnoRNAs(getBedCenterPoints(proteinCodingReads),snoRNAmasker,miRNAmasker)
filteredProteinCentersBedGraph=makeBedGraph(filteredProteinCodingCenters,genomeFile)
lincRNAReads=outfilepath+'clipGenes_lincRNA_LowFDRreads.bed'
filteredLincRNACenters=filterSnoRNAs(getBedCenterPoints(lincRNAReads),snoRNAmasker,miRNAmasker)
# <codecell>
# --- #
# <codecell>
def sortFilteredBed(bedFile):
bf=pd.DataFrame(pd.read_table(bedFile,header=None))
bf.columns=['Chr','Start','Stop','CLIPper_name','Q','Strand']
geneCounts=countHitsPerGene(bf)
return geneCounts
def countHitsPerGene(bf):
# *** THIS MAY DEPEND UPON THE VERSION OF CLIPPER USED ***
bf['geneName']=bf['CLIPper_name'].apply(lambda x: x.split('_')[0])
geneCounts=bf.groupby('geneName').size()
geneCounts.sort(ascending=False)
return geneCounts
def getSnoRNAreads(CLIPPERlowFDRcenters,snoRNAindex):
program='intersectBed'
bedFile=outfilepath+'clipGenes_snoRNA_LowFDRreads.bed'
outfh=open(bedFile, 'w')
proc=subprocess.Popen([program,'-a',CLIPPERlowFDRcenters,'-b',snoRNAindex,'-s','-wa','-wb'],stdout=outfh)
proc.communicate()
outfh.close()
return bedFile
def countSnoRNAs(bedFile_sno):
bf=pd.DataFrame(pd.read_table(bedFile_sno,header=None))
bf.columns=['Chr','Start','End','CLIPper_name','Q','Strand','Chr_snoRNA','Start_snoRNA','Stop_snoRNA','name_snoRNA','Type','strand_snoRNA']
geneCounts=bf.groupby('name_snoRNA').size()
geneCounts.sort(ascending=False)
return geneCounts
def countRemainingGeneTypes(remaining):
for bedFile in remaining:
try:
bf=pd.DataFrame(pd.read_table(bedFile,header=None))
bf.columns=['Chr','Start','End','ReadName','Q','Strand','CLIPper_winChr','CLIPper_winStart','CLIPper_winEmd','CLIPper_winaName','CLIPper_winP','CLIPper_winStrand']
# *** THIS MAY DEPEND UPON THE VERSION OF CLIPPER USED ***
bf['geneName']=bf['CLIPper_winaName'].apply(lambda x: x.split('_')[0])
geneCounts=bf.groupby('geneName').size()
geneCounts.sort(ascending=False)
head,fname=os.path.split(bedFile)
geneType=fname.split("_")[1]
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_%s'%geneType
geneCounts.to_csv(outfilepathToSave)
except ValueError:
print "No reads in %s"%bedFile
print "Generate sorted gene lists by gene type."
logOpen.write("Generate sorted gene lists by gene type.\n")
bedFile_pc=outfilepath+"clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed"
geneCounts_pc=sortFilteredBed(bedFile_pc)
outfilepathToSave=outfilepath + '/PlotData_ReadsPerGene_proteinCoding'
geneCounts_pc.to_csv(outfilepathToSave)
bedFile_linc=outfilepath+"clipGenes_lincRNA_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed"
geneCounts_linc=sortFilteredBed(bedFile_linc)
outfilepathToSave=outfilepath + '/PlotData_ReadsPerGene_lincRNA'
geneCounts_linc.to_csv(outfilepathToSave)
CLIPPERlowFDRcenters=getBedCenterPoints(CLIPPERlowFDR)
allLowFDRCentersBedGraph=makeBedGraph(CLIPPERlowFDRcenters,genomeFile)
bedFile_sno=getSnoRNAreads(CLIPPERlowFDRcenters,snoRNAindex)
geneCounts_sno=countSnoRNAs(bedFile_sno)
outfilepathToSave=outfilepath + '/PlotData_ReadsPerGene_snoRNA'
geneCounts_sno.to_csv(outfilepathToSave)
remaining=[f for f in glob.glob(outfilepath+"*_LowFDRreads.bed") if 'lincRNA' not in f and 'proteinCoding' not in f and 'snoRNA' not in f]
countRemainingGeneTypes(remaining)
# <codecell>
def makeClusterCenter(windowsFile):
# Usage: Generate a file of cluster centers.
# Input: Raw CLIPper output file.
# Output: File with coordinates for the center of each CLIPper cluster.
cleanBed = cleanBedFile(windowsFile)
centers=cleanBed.replace('.bed','.clusterCenter')
f = open(centers, 'w')
with open(cleanBed, 'r') as infile:
for line in infile:
elementList = line.strip().split('\t')
diff=abs(int((int(elementList[1])-int(elementList[2]))/2))
f.write(elementList[0]+'\t'+str(int(elementList[1])+diff)+'\t'+str(int(elementList[1])+diff+1)+'\n')
f.close()
return centers
def getClusterIntensity(bedGraph,centerCoordinates):
# Usage: Generate a matrix of read itensity values around CLIPper cluster center.
# Input: BedGraph and cluster center file.
# Output: Generates a matrix, which is passed into R.
program=os.getcwd() + '/bin/grep_chip-seq_intensity.pl'
program2='wait'
proc=subprocess.Popen(['perl',program, centerCoordinates, bedGraph],)
proc.communicate()
logOpen.write("Waiting for Cluster Intensity file completion...\n")
proc2=subprocess.Popen(program2,shell=True)
proc2.communicate()
print "Get binding intensity around cluster centers."
logOpen.write("Get binding intensity around cluster centers.\n")
bedGraphCLIPin=makeBedGraph(CLIPPERin,genomeFile)
centerCoordinates=makeClusterCenter(CLIPperOutBed)
getClusterIntensity(bedGraphCLIPin,centerCoordinates)
# <codecell>
def partitionReadsByUTR(infile,UTRmask,utrReads,notutrReads):
program = 'intersectBed'
outfh = open(utrReads,'w')
proc = subprocess.Popen([program,'-a',infile,'-b',UTRmask,'-u','-s'],stdout=outfh)
proc.communicate()
outfh.close()
outfh = open(notutrReads,'w')
proc = subprocess.Popen([program,'-a',infile,'-b',UTRmask,'-v','-s'],stdout=outfh)
proc.communicate()
outfh.close()
def extractUTRs(bedIn,fivePUTRBed,threePUTRBed,cdsBed):
# Usage: Extract all UTR specific reads from the input file.
# Input: .bed file
# Output: Mutually exclusive partitions of the input file.
fivePreads = bedIn.replace('.bed', '_5p.bed')
notFivePreads = bedIn.replace('.bed', '_NOT5p.bed')
partitionReadsByUTR(bedIn,fivePUTRBed,fivePreads,notFivePreads)
threePreads = bedIn.replace('.bed', '_3p.bed')
notThreePreads = bedIn.replace('.bed', '_NOT3p.bed')
partitionReadsByUTR(notFivePreads,threePUTRBed,threePreads,notThreePreads)
CDSreads = bedIn.replace('.bed', '_cds.bed')
notCDSreads = bedIn.replace('.bed', '_NOTcds.bed')
partitionReadsByUTR(notThreePreads,cdsBed,CDSreads,notCDSreads)
return (fivePreads,notFivePreads,CDSreads,notCDSreads,threePreads,notThreePreads)
print "Intron and UTR analysis."
logOpen.write("Intron and UTR analysis.\n")
fivePreads,notFivePreads,CDSreads,notCDSreads,threePreads,notThreePreads=extractUTRs(filteredProteinCodingCenters,fivePUTRBed,threePUTRBed,cdsBed)
geneCounts_5p=sortFilteredBed(fivePreads)
geneCounts_3p=sortFilteredBed(threePreads)
geneCounts_cds=sortFilteredBed(CDSreads)
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_5pUTR'
geneCounts_5p.to_csv(outfilepathToSave)
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_3pUTR'
geneCounts_3p.to_csv(outfilepathToSave)
outfilepathToSave=outfilepath+'/PlotData_ReadsPerGene_CDS'
geneCounts_cds.to_csv(outfilepathToSave)
# <codecell>
def makeTab(bedGraph,genesFile,sizesFile):
program = os.getcwd() + '/bin/bedGraph2tab.pl'
program2 = 'wait'
outfile=bedGraph.replace('.bedgraph','.tab')
proc = subprocess.Popen(['perl',program,genesFile,sizesFile,bedGraph,outfile],)
proc.communicate()
proc2 = subprocess.Popen(program2,shell=True)
proc2.communicate()
return outfile
def makeAvgGraph(bedGraph,utrFile,genesFile,sizesFile):
# Usage: Generate a matrix of read itensity values across gene body.
# Input: BedGraph.
# Output: Generates two matricies.
program= os.getcwd() + '/bin/averageGraph_scaled_tab.pl'
program2 = 'wait'
tabFile=makeTab(bedGraph,genesFile,sizesFile)
outhandle=tabFile.replace('.tab','_UTRs')
proc = subprocess.Popen(['perl',program,utrFile,tabFile,tabFile,outhandle],)
proc.communicate()
proc2 = subprocess.Popen(program2,shell=True)
proc2.communicate()
print "Gene body analysis."
logOpen.write("Gene body analysis.\n")
bedGraphProtein=makeBedGraph(bedFile_pc,genomeFile)
makeAvgGraph(bedGraphProtein,utrFile,genesFile,sizesFile)
# <codecell>
def getGeneStartStop(bedFile,geneRef):
try:
bf=pd.DataFrame(pd.read_table(bedFile,header=None))
bf.columns=['Chr','Start','End','ReadName','Q','Strand','CLIPper_winChr','CLIPper_winStart','CLIPper_winEmd','CLIPper_winaName','CLIPper_winP','CLIPper_winStrand']
bf['geneName']=bf['CLIPper_winaName'].apply(lambda x: x.split('_')[0])
merge=pd.merge(geneRef,bf,left_on='Ensembl Gene ID',right_on='geneName')
ncRNA_startStop=merge[['Ensembl Gene ID','Gene Start (bp)','Gene End (bp)','Start','End','Strand']]
outfilepathToSave=bedFile.replace(".bed",".geneStartStop")
ncRNA_startStop.to_csv(outfilepathToSave)
except ValueError:
print "No reads in %s"%bedFile
print "ncRNA gene body anaysis."
geneStartStopRepo=os.getcwd()+'/docs/all_genes.txt'
geneRef=pd.DataFrame(pd.read_table(geneStartStopRepo))
remaining=[f for f in glob.glob(outfilepath+"*_LowFDRreads.bed") if 'lincRNA' not in f and 'proteinCoding' not in f and 'snoRNA' not in f]
for bedFile in remaining:
st_stop=getGeneStartStop(bedFile,geneRef)
# lincRNA file processing
bedFile_linc=outfilepath+"clipGenes_lincRNA_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed"
bf=pd.DataFrame(pd.read_table(bedFile_linc,header=None))
bf.columns=['Chr','Start','Stop','CLIPper_name','Q','Strand']
bf['geneName']=bf['CLIPper_name'].apply(lambda x: x.split('_')[0])
merge=pd.merge(geneRef,bf,left_on='Ensembl Gene ID',right_on='geneName')
ncRNA_startStop=merge[['Ensembl Gene ID','Gene Start (bp)','Gene End (bp)','Start','Stop','Strand']]
outfilepathToSave=bedFile_linc.replace(".bed",".geneStartStop")
ncRNA_startStop.to_csv(outfilepathToSave)
# <codecell>
def makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation):
repeat_genome=np.genfromtxt(repeatGenomeBuild,dtype='string')
repeat_genome_bases=repeat_genome[1]
repeat_genome_size=len(repeat_genome[1])
repeatAnnotDF=pd.DataFrame(pd.read_table(repeatAnnotation,header=None))
repeatAnnotDF.columns=['Name','Length','IndexStart','IndexEnd']
repeatAnnotDF['End_for_extraction']=repeatAnnotDF['IndexEnd']+1 # Python list extraction is not end index inclusive; to extract sequence, use end + 1.
return (repeat_genome_bases,repeatAnnotDF)
def readBed(path):
bedFile = pd.read_table(path,dtype=str,header=None)
bedFile.columns=['Index','Start','Stop','Name','QS','Strand']
bedFile['Start']=bedFile['Start'].astype(int)
return bedFile
print "Record repeat RNA."
repeat_genome_bases,repeatAnnotDF=makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation)
repeatAnnotDF.set_index('Name',inplace=True,drop=False)
# Get merged data for repeat index.
repeatMerged=glob.glob(outfilepath+"*repeat_allreads.mergedRT.bed")
rep=pd.read_table(repeatMerged[0],dtype=str,header=None)
rep.columns=['Rep_index','Start','Stop','Read_name','Q','Strand']
rep['RT_stop']=rep['Start'].astype(int)+expand
for ix in repeatAnnotDF.index:
end=repeatAnnotDF.loc[ix,'IndexEnd']
repName=repeatAnnotDF.loc[ix,'Name']
gene_hits=rep[(rep['RT_stop']<int(repeatAnnotDF.loc[ix,'IndexEnd']))&(rep['RT_stop']>int(repeatAnnotDF.loc[ix,'IndexStart']))]
gene_hits['Repeat_End']=repeatAnnotDF.loc[ix,'IndexEnd']
gene_hits['Repeat_Start']=repeatAnnotDF.loc[ix,'IndexStart']
outfilepathToSave=outfilepath + '/PlotData_RepeatRNAreads_%s'%repName
gene_hits.to_csv(outfilepathToSave)
# <codecell>
def makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation):
repeat_genome=np.genfromtxt(repeatGenomeBuild,dtype='string')
repeat_genome_bases=repeat_genome[1]
repeat_genome_size=len(repeat_genome[1])
repeatAnnotDF=pd.DataFrame(pd.read_table(repeatAnnotation,header=None))
repeatAnnotDF.columns=['Name','Length','IndexStart','IndexEnd']
repeatAnnotDF['End_for_extraction']=repeatAnnotDF['IndexEnd']+1 # Python list extraction is not end index inclusive; to extract sequence, use end + 1.
return (repeat_genome_bases,repeatAnnotDF)
repeat_genome_bases,repeatAnnotDF=makeRepeatAnnotation(repeatGenomeBuild,repeatAnnotation)
# <codecell>
def lineCount(filename):
i=0
with open(filename) as f:
for i,l in enumerate(f):
pass
return i+1
def plot_ReadAccounting(outfilepath,sampleName):
rawRead1=infilepath+sampleName+'_R1.fastq'
rawRead2=infilepath+sampleName+'_R2.fastq'
reads3pTrim=[outfilepath+sampleName+'_R1_3ptrimmed.fastq',outfilepath+sampleName+'_R2_3ptrimmed.fastq']
readsFilter=[outfilepath+sampleName+'_R1_3ptrimmed_filter.fastq',outfilepath+sampleName+'_R2_3ptrimmed_filter.fastq']
readsNoDupes=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe.fastq',outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe.fastq']
readsMappedReapeat=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_mappedTorepeat_withDupes.bed',outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_mappedTorepeat_withDupes.bed']
readsMappedHg19=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag,outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag]
readsMappedBlacklist=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag,outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes.bed'%index_tag]
readsMappedRepeatMask=[outfilepath+sampleName+'_R1_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes_noBlacklist_noRepeat.bed'%index_tag,outfilepath+sampleName+'_R2_3ptrimmed_filter_nodupe_5ptrimmed_notMappedTorepeat_mappedTo%s_withDupes_noBlacklist_noRepeat.bed'%index_tag]
clipperIN=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIPPERin.bed'%(threshold,index_tag)
clipperOUT=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters_lowFDRreads.bed'%(threshold,index_tag)
fileNames=['Raw (R1)','Raw (R2)','3p Trim (R1)','3p Trim (R2)','Filter (R1)','Filter (R2)','No dupes (R1)','No dupes (R2)','RepeatMapped (R1)','RepeatMapped (R2)','Hg19Mapped (R1)','Hg19Mapped (R2)','Blacklist (R1)','Blacklist (R2)','RepeatMask (R1)','RepeatMask (R2)','ClipperIn','ClipperOut']
filesToCount=[rawRead1,rawRead2,reads3pTrim[0],reads3pTrim[1],readsFilter[0],readsFilter[1],readsNoDupes[0],readsNoDupes[1],readsMappedReapeat[0],readsMappedReapeat[1],readsMappedHg19[0],readsMappedHg19[1],readsMappedBlacklist[0],readsMappedBlacklist[1],readsMappedRepeatMask[0],readsMappedRepeatMask[1],clipperIN,clipperOUT]
counts=[]
counter=0
for fileString in filesToCount:
temp=lineCount(fileString)
if counter < 8:
temp=temp/4 # Fastq files
counts=counts+[temp]
counter += 1
ind = np.arange(len(counts)) + 0.5
plt.barh(ind,list(reversed(np.log10(np.array(counts)))),align='center',color='blue')
plt.xlabel('log10(Counts per file)',fontsize=5)
locs,pltlabels = plt.xticks(fontsize=5)
plt.setp(pltlabels, rotation=90, fontsize=5)
plt.yticks(ind,list(reversed(fileNames)),fontsize=5)
plt.tick_params(axis='yticks',labelsize=5)
ax=plt.gca()
for line in ax.get_yticklines():
line.set_markersize(0)
plt.title('Read counts',fontsize=5)
readDF=pd.DataFrame()
readDF['File_name']=fileNames
readDF['Reads_per_file']=counts
outfilepathToSave=outfilepath + '/PlotData_ReadsPerPipeFile'
readDF.to_csv(outfilepathToSave)
plt.subplot(2,3,1)
plot_ReadAccounting(outfilepath,sampleName)
# <codecell>
def plot_BoundGeneTypes(outfilepath,sampleName):
record=pd.DataFrame()
# Exclude specific files (e.g., UTR-specific reads).
geneListToPlot=[f for f in glob.glob(outfilepath+'PlotData_ReadsPerGene_*') if '5pUTR' not in f and '3pUTR' not in f and 'CDS' not in f]
for boundGenes in geneListToPlot:
glist=pd.read_csv(boundGenes,header=None)
glist.columns=['GeneName','Count']
gName=boundGenes.split('_')[-1]
record.loc[gName,'genesBound']=glist.shape[0]
record.loc[gName,'totalReads']=glist['Count'].sum()
record.sort('genesBound',inplace=True)
outfilepathToSave=outfilepath + '/PlotData_ReadAndGeneCountsPerGenetype'
record.to_csv(outfilepathToSave)
ind = np.arange(record.shape[0]) + 0.5
plt.bar(ind,record['genesBound'],align='center',color='blue')
locs,pltlabels = plt.yticks(fontsize=5)
locs,pltlabels = plt.xticks(ind,record.index,fontsize=5)
plt.setp(pltlabels, rotation=90, fontsize=5)
plt.tick_params(axis='xticks',labelsize=5)
ax=plt.gca()
for line in ax.get_xticklines():
line.set_markersize(0)
plt.ylabel('Number of genes bound',fontsize=5)
plt.tick_params(axis='yticks',labelsize=5)
plt.title('Bound genes by class',fontsize=5)
plt.subplot(2,3,6)
plot_BoundGeneTypes(outfilepath,sampleName)
# <codecell>
def plot_ReadsPerCluster(outfilepath,sampleName):
readPerCluster=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters.readsPerCluster'%(threshold,index_tag)
clust=pd.DataFrame(pd.read_table(readPerCluster,header=None))
clust.columns=['ReadsPerCluster']
clust=clust['ReadsPerCluster']
interval=10
bins=range(min(clust)-10,max(clust)+10,interval)
hist,bins=np.histogram(clust,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
plt.bar(center, hist,align='center',width=width)
locs,pltlabels = plt.yticks(fontsize=5)
locs,pltlabels = plt.xticks(center,center,fontsize=5)
plt.setp(pltlabels, rotation=90, fontsize=3.5)
plt.tick_params(axis='yticks',labelsize=5)
plt.xlabel('Reads per cluster (bin=%s)'%interval,fontsize=5)
plt.ylabel('Frequency (RT stop count)',fontsize=5)
plt.title('Reads per cluster',fontsize=5)
plt.xlim(0,100) # Make the histogram easy to view.
# plt.xlim(-interval,np.max(center)+interval)
plt.subplot(2,3,2)
plot_ReadsPerCluster(outfilepath,sampleName)
# <codecell>
def plot_ClusterSizes(outfilepath,sampleName):
clipClusters=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters'%(threshold,index_tag)
clust=pd.DataFrame(pd.read_table(clipClusters,header=None,skiprows=1))
clust.columns=['chr','start','end','name','score','strand','m1','m2']
clust['clusterSize']=clust['start']-clust['end']
clust['clusterSize']=clust['clusterSize'].apply(lambda x: math.fabs(x))
plt.boxplot(clust['clusterSize'])
plt.tick_params(axis='x',labelbottom='off')
ax=plt.gca()
for line in ax.get_xticklines():
line.set_markersize(0)
plt.ylabel('Cluster length (bases)',fontsize=5)
locs,pltlabels = plt.yticks(fontsize=5)
plt.title('Cluster size',fontsize=5)
plt.subplot(2,3,3)
plot_ClusterSizes(outfilepath,sampleName)
# <codecell>
def plot_clusterBindingIntensity(outfilepath,sampleName):
clusterCenterHeatmap=outfilepath+sampleName+'_threshold=%s_%s_allreads.mergedRT_CLIP_clusters_cleaned_sorted.clusterCenter_heatmap.txt'%(threshold,index_tag)
hmap=pd.DataFrame(pd.read_table(clusterCenterHeatmap,header=None,skiprows=1))
hmap_vals=hmap.ix[:,1:]
sums=hmap_vals.sum(axis=1)
hmap_vals=hmap_vals.loc[np.argsort(sums),:]
plt.ylim(0,hmap_vals.shape[0])
p=plt.pcolormesh(np.array(hmap_vals),cmap='Blues')
plt.tick_params(axis='x',labelbottom='off')
plt.xlabel('Cluster position',fontsize=5)
locs,pltlabels = plt.yticks(fontsize=5)
plt.ylabel('Cluster number',fontsize=5)
plt.title('Read distribution',fontsize=5)
plt.subplot(2,3,4)
plot_clusterBindingIntensity(outfilepath,sampleName)
# <codecell>
def readUTRfile(path):
geneCounts=pd.read_csv(path,header=None)
geneCounts.columns=['Gene_name','Count']
return geneCounts
def plot_readsBymRNAregion(outfilepath,sampleName):
pc_5pReads=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_5pUTR')['Count'].sum()
pc_3pReads=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_3pUTR')['Count'].sum()
pc_CDSReads=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_CDS')['Count'].sum()
non_intronic=pc_5pReads+pc_3pReads+pc_CDSReads
allProteinCoding=outfilepath +'clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved.bed'
all_pc=pd.DataFrame(pd.read_table(allProteinCoding,header=None))
pc_allReads=all_pc.shape[0]
v=[float(pc_allReads-non_intronic)/pc_allReads,float(pc_5pReads)/pc_allReads,float(pc_CDSReads)/pc_allReads,float(pc_3pReads)/pc_allReads]
pie_wedges=ax.pie(v,labels=["Intronic","5p UTR","CDS","3pUTR"],labeldistance=1.1,autopct='%1.1f%%')
plt.rcParams['font.size']=5
for wedge in pie_wedges[0]:
wedge.set_edgecolor('black')
wedge.set_lw(1)
ax=plt.subplot(2,3,5)
plot_readsBymRNAregion(outfilepath,sampleName)
# <codecell>
fig1=plt.figure(1)
plt.subplot(2,3,1)
plot_ReadAccounting(outfilepath,sampleName)
plt.subplot(2,3,2)
plot_ReadsPerCluster(outfilepath,sampleName)
plt.subplot(2,3,3)
plot_ClusterSizes(outfilepath,sampleName)
plt.subplot(2,3,4)
plot_clusterBindingIntensity(outfilepath,sampleName)
ax=plt.subplot(2,3,5)
plot_readsBymRNAregion(outfilepath,sampleName)
plt.subplot(2,3,6)
plot_BoundGeneTypes(outfilepath,sampleName)
fig1.tight_layout()
fig1.savefig(outfilepath+'Figure1.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig1.savefig(outfilepath+'Figure1.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def plot_mRNAgeneBodyDist(outfilepath,sampleName):
averageGraph=outfilepath+'clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved_cleaned_sorted_UTRs_scaled_cds200_abt0_averageGraph.txt'
hmap=pd.DataFrame(pd.read_table(averageGraph,header=None,skiprows=1))
hmap=hmap.set_index(0)
avgTrace=hmap.loc['treat',:]
plt.plot(avgTrace,color='blue',linewidth='2')
plt.vlines(200,0,np.max(avgTrace),linestyles='dashed')
plt.vlines(400,0,np.max(avgTrace),linestyles='dashed')
plt.ylim(0,np.max(avgTrace))
plt.tick_params(axis='x',labelbottom='off')
plt.xlabel('mRNA gene body (5pUTR, CDS, 3pUTR)')
plt.ylabel('Read density')
plt.tick_params(axis='y',labelsize=5)
plt.title('CLIP signal across average mRNA transcript.',fontsize=5)
plt.subplot2grid((2,3),(0,0),colspan=3)
plot_mRNAgeneBodyDist(outfilepath,sampleName)
# <codecell>
def convertENBLids(enst_name):
ensg_name=ensemblGeneAnnot.loc[enst_name,'name2']
return ensg_name
def getUTRbindingProfile(utr,hmap_m):
if utr=='5p':
ix=(hmap_m[range(201,601)].sum(axis=1)==0)&(hmap_m[range(1,201)].sum(axis=1)>0)
screen=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_5pUTR')
elif utr=='3p':
ix=(hmap_m[range(1,401)].sum(axis=1)==0)&(hmap_m[range(401,601)].sum(axis=1)>0)
screen=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_3pUTR')
else:
ix=(hmap_m[range(1,201)].sum(axis=1)==0)&(hmap_m[range(401,601)].sum(axis=1)==0)&(hmap_m[range(201,401)].sum(axis=1)>0)
screen=readUTRfile(outfilepath+'/PlotData_ReadsPerGene_CDS')
# Ensure all genes are also identified in pre-allocated gene lists.
hmap_m_utrSpec=hmap_m.ix[ix,:]
hmap_m_utrSpec_filter=pd.merge(hmap_m_utrSpec,screen,left_on='ENSG_ID',right_on='Gene_name',how='inner')
sums=hmap_m_utrSpec_filter[range(1,601)].sum(axis=1)
hmap_m_utrSpec_filter=hmap_m_utrSpec_filter.loc[np.argsort(sums),:]
return hmap_m_utrSpec_filter
def plot_geneBodyPartition(outfilepath,sampleName):
treatMatrix=outfilepath+'clipGenes_proteinCoding_LowFDRreads_centerCoord_snoRNAremoved_miRNAremoved_cleaned_sorted_UTRs_scaled_cds200_abt0_treatmatrix.txt'
hmap=pd.DataFrame(pd.read_table(treatMatrix,header=None,skiprows=1))
# Ensure genes recoverd from this analysis are indepdently indentified using partitioning of CLIPper cluster data.
hmap['ENSG_ID']=hmap.ix[:,0].apply(convertENBLids)
bound_pc = outfilepath+'clipGenes_proteinCoding'
pc_genes=pd.DataFrame(pd.read_table(bound_pc,header=None,))
pc_genes.columns=['ENSG_ID']
hmap_m=pd.merge(hmap,pc_genes,left_on='ENSG_ID',right_on='ENSG_ID',how='inner')
# Isolate intronic bound genes.
tosave=outfilepath+'PlotData_ExclusiveBound_Intronic'
intronicBoundGenes=list(set(pc_genes['ENSG_ID'])-set(hmap_m['ENSG_ID']))
np.savetxt(tosave,np.array(intronicBoundGenes),fmt="%s")
# UTR specific genes.
geneTypes=['5p','cds','3p']
depth=50
for i in range(0,3):
utrMatrix=getUTRbindingProfile(geneTypes[i],hmap_m)
tosave=outfilepath+'PlotData_ExclusiveBound_%s'%geneTypes[i]
np.savetxt(tosave,utrMatrix['ENSG_ID'],fmt="%s")
plt.subplot2grid((2,3),(1,i),colspan=1)
dataToPlot=utrMatrix[range(1,601)]
p=plt.pcolormesh(np.array(dataToPlot)[-depth:-1,:],cmap='Blues')
plt.title(geneTypes[i],fontsize=5)
plt.vlines(200,0,depth,linestyles='dashed')
plt.vlines(400,0,depth,linestyles='dashed')
plt.tick_params(axis='x',labelbottom='off')
plt.tick_params(axis='y',labelleft='off')
plt.ylim(0,depth)
plt.ylabel('Ranked genes (highest on bottom)',fontsize=5)
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.title('%s specific genes: %s'%(geneTypes[i],np.unique(utrMatrix['ENSG_ID']).shape[0]),fontsize=5)
ensemblGeneAnnot=pd.DataFrame(pd.read_table(genesFile))
ensemblGeneAnnot=ensemblGeneAnnot.set_index('name') # Make ENST the index
plot_geneBodyPartition(outfilepath,sampleName)
# <codecell>
fig2=plt.figure(2)
plt.subplot2grid((2,3),(0,0),colspan=3)
plot_mRNAgeneBodyDist(outfilepath,sampleName)
plot_geneBodyPartition(outfilepath,sampleName)
fig2.tight_layout()
fig2.savefig(outfilepath+'Figure2.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig2.savefig(outfilepath+'Figure2.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def plot_repeatRNA(outfilepath,sampleName):
repeat_genome=np.genfromtxt(repeatGenomeBuild,dtype='string')
repeat_genome_bases=repeat_genome[1]
repFiles=glob.glob(outfilepath + '/PlotData_RepeatRNAreads_*')
repFiles=[repFile for repFile in repFiles if 'rDNA' not in repFile]
plotDim=math.ceil(math.sqrt(len(repFiles)))
i=0
for path in repFiles:
name=path.split('RepeatRNAreads_')[-1]
try:
# Read in each RT stop file
hits_per_rep=pd.read_csv(path)
RTpositions=hits_per_rep['RT_stop']
start=hits_per_rep.loc[0,'Repeat_Start']
end=hits_per_rep.loc[0,'Repeat_End']
# Histogram of RT stops across gene body
bins=range(start,end+2,1)
hist,bins=np.histogram(RTpositions,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
# Normalize
histPlot=np.array(hist,dtype=float)
histPlot=np.array(histPlot/float(len(RTpositions)),dtype=float)
# Subplot
plt.subplot(plotDim,plotDim,i+1)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.45)
plt.tick_params(axis='x',labelsize=2.5)
plt.tick_params(axis='y',labelsize=2.5)
plt.title('RT stops for %s: %s'%(name,len(RTpositions)),fontsize=5)
plt.xlim(start,end)
# Record data
storageDF=pd.DataFrame()
sequence=repeat_genome_bases[start:end+1]
storageDF['Sequence']=pd.Series(list(sequence))
readsPerBase=np.array(list(hist))
readsPerBaseNorm=np.array(list(histPlot))
storageDF['RT_stops']=readsPerBase
storageDF['RT_stops_norm']=readsPerBaseNorm
outfilepathToSave=outfilepath +'/PlotData_RepeatRNAHist_%s'%name
storageDF.to_csv(outfilepathToSave)
i+=1
except:
print "No reads for repeatRNA %s"%name
plt.tight_layout()
fig3=plt.figure(3)
plot_repeatRNA(outfilepath,sampleName)
fig3.tight_layout()
fig3.savefig(outfilepath+'Figure3.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig3.savefig(outfilepath+'Figure3.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def plot_rDNA(outfilepath,sampleName):
plt.subplot2grid((3,3),(0,0),colspan=3)
name='rDNA'
rDNA=glob.glob(outfilepath + 'PlotData_RepeatRNAreads_rDNA')
hits_per_rep=pd.read_csv(rDNA[0])
RTpositions=hits_per_rep['RT_stop']
start=hits_per_rep.loc[0,'Repeat_Start']
end=hits_per_rep.loc[0,'Repeat_End']
bins=range(start,end+2,1)
hist,bins=np.histogram(RTpositions,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1]+bins[1:])/2
histPlot=np.array(hist,dtype=float)
histPlot=np.array(histPlot/float(len(RTpositions)),dtype=float)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.45)
plt.tick_params(axis='x',labelsize=2.5)
plt.tick_params(axis='y',labelsize=2.5)
plt.title('RT stops for %s: %s'%(name,len(RTpositions)),fontsize=5)
plt.xlim(start,end)
# Record data
storageDF=pd.DataFrame()
sequence=repeat_genome_bases[start:end+1]
storageDF['Sequence']=pd.Series(list(sequence))
readsPerBase=np.array(list(hist))
readsPerBaseNorm=np.array(list(histPlot))
storageDF['RT_stops']=readsPerBase
storageDF['RT_stops_norm']=readsPerBaseNorm
outfilepathToSave=outfilepath +'/PlotData_RepeatRNAHist_%s'%name
storageDF.to_csv(outfilepathToSave)
# Features of rDNA with respect to start of the bowtie index (index=0)
rRNAstart=start
plt.axvspan(start18s+rRNAstart,end18s+rRNAstart,facecolor='g',alpha=0.5)
plt.axvspan(start5s+rRNAstart,end5s+rRNAstart,facecolor='r',alpha=0.5)
plt.axvspan(start28s+rRNAstart,end28s+rRNAstart,facecolor='b',alpha=0.5)
# Generate histogram for transcribed region
plt.subplot2grid((3,3),(1,0),colspan=3)
datarDNAOnly=RTpositions-start
bins=range((start-start),(end-start+2),1)
hist,bins=np.histogram(datarDNAOnly,bins=bins)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
histPlot=np.array(hist,dtype=float)
histPlot=np.array(histPlot/float(len(RTpositions)),dtype=float)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.45)
plt.tick_params(axis='x',labelsize=2.5)
plt.tick_params(axis='y',labelsize=2.5)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.ylabel('Normalized RT stop / bin',fontsize=2.5)
plt.axvspan(start18s,end18s,facecolor='g',alpha=0.5)
plt.axvspan(start5s,end5s,facecolor='r',alpha=0.5)
plt.axvspan(start28s,end28s,facecolor='b',alpha=0.5)
plt.xlim(0,rRNAend)
# Individual regions
plt.subplot2grid((3,3),(2,0),colspan=1)
plt.bar(center,histPlot,align='center',width=width,color='green',alpha=0.75)
plt.xlim(start18s,end18s)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.ylabel('Normalized RT stop / bin',fontsize=2.5)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.title('18s Region',fontsize=5)
plt.subplot2grid((3,3),(2,1),colspan=1)
plt.bar(center,histPlot,align='center',width=width,color='red',alpha=0.75)
plt.xlim(start5s,end5s)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.title('5.8s Region',fontsize=5)
plt.subplot2grid((3,3),(2,2),colspan=1)
plt.bar(center,histPlot,align='center',width=width,color='blue',alpha=0.75)
plt.xlim(start28s,end28s)
plt.xlabel('rRNA locus position (bin=1 base)',fontsize=5)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.title('28s Region',fontsize=5)
plt.tight_layout()
fig4=plt.figure(4)
plot_rDNA(outfilepath,sampleName)
fig4.tight_layout()
fig4.savefig(outfilepath+'Figure4.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig4.savefig(outfilepath+'Figure4.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def getBindingFrac(type_specific):
# 5' position on the negative strand is snoRNA stop coordinate.
neg_data=type_specific[type_specific['strand_snoRNA']=='-']
neg_data['diff']=np.abs(neg_data['Stop_snoRNA']-neg_data['Start'])
neg_data['frac']=neg_data['diff']/(neg_data['Stop_snoRNA']-neg_data['Start_snoRNA'])
# 5' position on the positive strand is snoRNA start coordinate.
pos_data=type_specific[type_specific['strand_snoRNA']=='+']
pos_data['diff']=np.abs(pos_data['Start_snoRNA']-pos_data['Start'])
pos_data['frac']=pos_data['diff']/(pos_data['Stop_snoRNA']-pos_data['Start_snoRNA'])
DF_snoProfile=pd.concat([neg_data,pos_data])
return DF_snoProfile
print "snoRNA gene body anaysis."
# logOpen.write("Gene body analysis.\n")
bf_sno=pd.read_table(outfilepath+"clipGenes_snoRNA_LowFDRreads.bed",header=None)
bf_sno.columns=['Chr','Start','End','CLIPper_name','Q','Strand','Chr_snoRNA','Start_snoRNA','Stop_snoRNA','name_snoRNA','Type','strand_snoRNA']
snoTypes=pd.DataFrame(bf_sno.groupby('Type').size())
snoTypes.columns=['Reads']
snoTypes['Fraction']=snoTypes['Reads']/snoTypes['Reads'].sum(axis=1)
outfilepathToSave=outfilepath+'/PlotData_readsPerSnoRNAType'
snoTypes.to_csv(outfilepathToSave)
snoTypesAndGenes=pd.DataFrame(bf_sno.groupby(['Type','name_snoRNA']).size())
snoTypesAndGenes.columns=['Count_per_gene']
outfilepathToSave=outfilepath+'/PlotData_geneStatsPerSnoRNAType'
snoTypesAndGenes.to_csv(outfilepathToSave)
fig5=plt.figure(5)
ax=plt.subplot(2,2,1)
pie_wedges=ax.pie(snoTypes['Fraction'],labels=snoTypes.index,labeldistance=1.1,autopct='%1.1f%%')
plt.rcParams['font.size']=5
for wedge in pie_wedges[0]:
wedge.set_edgecolor('black')
wedge.set_lw(1)
i=2
for sType in set(bf_sno['Type']):
type_specific=bf_sno[bf_sno['Type']==sType]
sno_profile=getBindingFrac(type_specific)
if sType=='C':
title="C/D_box"
elif sType=='H':
title="H/ACA_box"
else:
title="scaRNA"
outfilepathToSave=outfilepath+'/PlotData_snoRNAReadDist_%s'%sType
sno_profile.to_csv(outfilepathToSave)
plt.subplot(2,2,i)
bins=np.arange(0,1,0.01)
hist,bins=np.histogram(sno_profile['frac'],bins=bins)
hist=np.array(hist/float(sno_profile['frac'].shape[0]),dtype=float)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
plt.bar(center,hist,align='center',width=width,color='blue',alpha=0.75)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.xlabel('Fraction of gene body (5p - 3p)',fontsize=5)
plt.title('Binding profile for %s'%title,fontsize=5)
plt.xlim([0,1])
# Record data
storageDF=pd.DataFrame()
storageDF['bins']=pd.Series(bins)
storageDF['hist']=pd.Series(hist)
outfilepathToSave=outfilepath+'/PlotData_snoRNAhistogram_%s'%sType
storageDF.to_csv(outfilepathToSave)
i+=1
fig5.tight_layout()
fig5.savefig(outfilepath+'Figure5.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig5.savefig(outfilepath+'Figure5.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
def getncRNABindingFrac(type_specific):
# 5' position on the negative strand is snoRNA stop coordinate.
neg_data=type_specific[type_specific['Strand']=='-']
neg_data['diff']=np.abs(neg_data['Gene End (bp)']-neg_data['RT_stop'])
neg_data['frac']=neg_data['diff']/(neg_data['Gene End (bp)']-neg_data['Gene Start (bp)'])
# 5' position on the positive strand is snoRNA start coordinate.
pos_data=type_specific[type_specific['Strand']=='+']
pos_data['diff']=np.abs(pos_data['Gene Start (bp)']-pos_data['RT_stop'])
pos_data['frac']=pos_data['diff']/(pos_data['Gene End (bp)']-pos_data['Gene Start (bp)'])
DF_ncRNAProfile=pd.concat([neg_data,pos_data])
return DF_ncRNAProfile
print "ncRNA gene body anaysis."
st_stopFiles=glob.glob(outfilepath+"*.geneStartStop")
st_stopFiles=[f for f in st_stopFiles if 'rRNA' not in f]
fig6=plt.figure(6)
plotDim=math.ceil(math.sqrt(len(st_stopFiles)))
i=1
for st_file in st_stopFiles:
name=st_file.split('clipGenes_')[1].split('_LowFDRreads')[0]
tmp=pd.read_csv(st_file)
tmp['RT_stop']=tmp['Start']+expand
tmp_profile=getncRNABindingFrac(tmp)
plt.subplot(plotDim,plotDim,i)
bins=np.arange(0,1,0.01)
hist,bins=np.histogram(tmp_profile['frac'],bins=bins)
hist=np.array(hist/float(tmp_profile['frac'].shape[0]),dtype=float)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1] + bins[1:])/2
plt.bar(center,hist,align='center',width=width,color='blue',alpha=0.75)
plt.tick_params(axis='x',labelsize=5)
plt.tick_params(axis='y',labelsize=5)
plt.xlabel('Fraction of gene body (5p - 3p)',fontsize=5)
plt.title('Binding profile for %s'%name,fontsize=5)
i+=1
fig6.tight_layout()
fig6.savefig(outfilepath+'Figure6.png',format='png',bbox_inches='tight',dpi=150,pad_inches=0.5)
fig6.savefig(outfilepath+'Figure6.pdf',format='pdf',bbox_inches='tight',dpi=150,pad_inches=0.5)
# <codecell>
logOpen.close()
# <codecell>
| gpl-2.0 |
ankurankan/scikit-learn | sklearn/linear_model/least_angle.py | 4 | 48488 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_array, check_X_y
from ..cross_validation import _check_cv as check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, (optional=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i+1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X = check_array(X)
y = np.asarray(y)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True):
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
parameters
----------
x : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X = check_array(X)
y = np.asarray(y)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
datapythonista/pandas | pandas/core/computation/engines.py | 2 | 3278 | """
Engine classes for :func:`~pandas.eval`
"""
import abc
from typing import (
Dict,
Type,
)
from pandas.core.computation.align import (
align_terms,
reconstruct_object,
)
from pandas.core.computation.expr import Expr
from pandas.core.computation.ops import (
MATHOPS,
REDUCTIONS,
)
import pandas.io.formats.printing as printing
_ne_builtins = frozenset(MATHOPS + REDUCTIONS)
class NumExprClobberingError(NameError):
pass
def _check_ne_builtin_clash(expr: Expr) -> None:
"""
Attempt to prevent foot-shooting in a helpful way.
Parameters
----------
expr : Expr
Terms can contain
"""
names = expr.names
overlap = names & _ne_builtins
if overlap:
s = ", ".join(repr(x) for x in overlap)
raise NumExprClobberingError(
f'Variables in expression "{expr}" overlap with builtins: ({s})'
)
class AbstractEngine(metaclass=abc.ABCMeta):
"""Object serving as a base class for all engines."""
has_neg_frac = False
def __init__(self, expr):
self.expr = expr
self.aligned_axes = None
self.result_type = None
def convert(self) -> str:
"""
Convert an expression for evaluation.
Defaults to return the expression as a string.
"""
return printing.pprint_thing(self.expr)
def evaluate(self) -> object:
"""
Run the engine on the expression.
This method performs alignment which is necessary no matter what engine
is being used, thus its implementation is in the base class.
Returns
-------
object
The result of the passed expression.
"""
if not self._is_aligned:
self.result_type, self.aligned_axes = align_terms(self.expr.terms)
# make sure no names in resolvers and locals/globals clash
res = self._evaluate()
return reconstruct_object(
self.result_type, res, self.aligned_axes, self.expr.terms.return_type
)
@property
def _is_aligned(self) -> bool:
return self.aligned_axes is not None and self.result_type is not None
@abc.abstractmethod
def _evaluate(self):
"""
Return an evaluated expression.
Parameters
----------
env : Scope
The local and global environment in which to evaluate an
expression.
Notes
-----
Must be implemented by subclasses.
"""
pass
class NumExprEngine(AbstractEngine):
"""NumExpr engine class"""
has_neg_frac = True
def _evaluate(self):
import numexpr as ne
# convert the expression to a valid numexpr expression
s = self.convert()
env = self.expr.env
scope = env.full_scope
_check_ne_builtin_clash(self.expr)
return ne.evaluate(s, local_dict=scope)
class PythonEngine(AbstractEngine):
"""
Evaluate an expression in Python space.
Mostly for testing purposes.
"""
has_neg_frac = False
def evaluate(self):
return self.expr()
def _evaluate(self) -> None:
pass
ENGINES: Dict[str, Type[AbstractEngine]] = {
"numexpr": NumExprEngine,
"python": PythonEngine,
}
| bsd-3-clause |