repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
fxia22/pointGAN | show_gan_rnn.py | 1 | 2043 | from __future__ import print_function
from show3d_balls import *
import argparse
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from datasets import PartDataset
from pointnet import PointGen, PointGenR
import torch.nn.functional as F
import matplotlib.pyplot as plt
#showpoints(np.random.randn(2500,3), c1 = np.random.uniform(0,1,size = (2500)))
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default = '', help='model path')
opt = parser.parse_args()
print (opt)
gen = PointGenR()
gen.load_state_dict(torch.load(opt.model))
#sim_noise = Variable(torch.randn(5, 2, 20))
#
#sim_noises = Variable(torch.zeros(5, 15, 20))
#
#for i in range(15):
# x = i/15.0
# sim_noises[:,i,:] = sim_noise[:,0,:] * x + sim_noise[:,1,:] * (1-x)
#
#points = gen(sim_noises)
#point_np = points.transpose(2,1).data.numpy()
sim_noise = Variable(torch.randn(5, 6, 20))
sim_noises = Variable(torch.zeros(5, 30 * 5,20))
for j in range(5):
for i in range(30):
x = (1-i/30.0)
sim_noises[:,i + 30 * j,:] = sim_noise[:,j,:] * x + sim_noise[:,(j+1) % 5,:] * (1-x)
points = gen(sim_noises)
point_np = points.transpose(2,1).data.numpy()
print(point_np.shape)
for i in range(150):
print(i)
frame = showpoints_frame(point_np[i])
plt.imshow(frame)
plt.axis('off')
plt.savefig('%s/%04d.png' %('out_rgan', i), bbox_inches='tight')
plt.clf()
#showpoints(point_np)
#sim_noise = Variable(torch.randn(5, 1000, 20))
#points = gen(sim_noise)
#point_np = points.transpose(2,1).data.numpy()
#print(point_np.shape)
#choice = np.random.choice(2500, 2048, replace=False)
#print(point_np[:, choice, :].shape)
#showpoints(point_np)
#np.savez('rgan.npz', points = point_np[:, choice, :])
| mit |
shikhardb/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
CartoDB/crankshaft | release/python/0.8.2/crankshaft/crankshaft/segmentation/segmentation.py | 1 | 8893 | """
Segmentation creation and prediction
"""
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import metrics
from sklearn.cross_validation import train_test_split
from crankshaft.analysis_data_provider import AnalysisDataProvider
# NOTE: added optional param here
class Segmentation(object):
"""
Add docstring
"""
def __init__(self, data_provider=None):
if data_provider is None:
self.data_provider = AnalysisDataProvider()
else:
self.data_provider = data_provider
def create_and_predict_segment_agg(self, target, features, target_features,
target_ids, model_parameters):
"""
Version of create_and_predict_segment that works on arrays that come
straight form the SQL calling the function.
Input:
@param target: The 1D array of length NSamples containing the
target variable we want the model to predict
@param features: The 2D array of size NSamples * NFeatures that
form the input to the model
@param target_ids: A 1D array of target_ids that will be used
to associate the results of the prediction with the rows which
they come from
@param model_parameters: A dictionary containing parameters for
the model.
"""
clean_target, _ = replace_nan_with_mean(target)
clean_features, _ = replace_nan_with_mean(features)
target_features, _ = replace_nan_with_mean(target_features)
model, accuracy = train_model(clean_target, clean_features,
model_parameters, 0.2)
prediction = model.predict(target_features)
accuracy_array = [accuracy] * prediction.shape[0]
return zip(target_ids, prediction, accuracy_array)
def create_and_predict_segment(self, query, variable, feature_columns,
target_query, model_params,
id_col='cartodb_id'):
"""
generate a segment with machine learning
Stuart Lynn
@param query: subquery that data is pulled from for packaging
@param variable: name of the target variable
@param feature_columns: list of column names
@target_query: The query to run to obtain the data to predict
@param model_params: A dictionary of model parameters, the full
specification can be found on the
scikit learn page for [GradientBoostingRegressor]
(http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html)
"""
params = {"subquery": target_query,
"id_col": id_col}
(target, features, target_mean,
feature_means) = self.clean_data(query, variable, feature_columns)
model, accuracy = train_model(target, features, model_params, 0.2)
result = self.predict_segment(model, feature_columns, target_query,
feature_means)
accuracy_array = [accuracy] * result.shape[0]
rowid = self.data_provider.get_segmentation_data(params)
'''
rowid = [{'ids': [2.9, 4.9, 4, 5, 6]}]
'''
return zip(rowid[0]['ids'], result, accuracy_array)
def predict_segment(self, model, feature_columns, target_query,
feature_means):
"""
Use the provided model to predict the values for the new feature set
Input:
@param model: The pretrained model
@features_col: A list of features to use in the
model prediction (list of column names)
@target_query: The query to run to obtain the data to predict
on and the cartodb_ids associated with it.
"""
batch_size = 1000
params = {"subquery": target_query,
"feature_columns": feature_columns}
results = []
cursors = self.data_provider.get_segmentation_predict_data(params)
'''
cursors = [{'features': [[m1[0],m2[0],m3[0]],[m1[1],m2[1],m3[1]],
[m1[2],m2[2],m3[2]]]}]
'''
while True:
rows = cursors.fetch(batch_size)
if not rows:
break
batch = np.row_stack([np.array(row['features'])
for row in rows]).astype(float)
batch = replace_nan_with_mean(batch, feature_means)[0]
prediction = model.predict(batch)
results.append(prediction)
# NOTE: we removed the cartodb_ids calculation in here
return np.concatenate(results)
def clean_data(self, query, variable, feature_columns):
"""
Add docstring
"""
params = {"subquery": query,
"target": variable,
"features": feature_columns}
data = self.data_provider.get_segmentation_model_data(params)
'''
data = [{'target': [2.9, 4.9, 4, 5, 6],
'feature1': [1,2,3,4], 'feature2' : [2,3,4,5]}]
'''
# extract target data from data_provider object
target = np.array(data[0]['target'], dtype=float)
# put n feature data arrays into an n x m array of arrays
features = np.column_stack([np.array(data[0][col])
for col in feature_columns]).astype(float)
features, feature_means = replace_nan_with_mean(features)
target, target_mean = replace_nan_with_mean(target)
return target, features, target_mean, feature_means
def replace_nan_with_mean(array, means=None):
"""
Input:
@param array: an array of floats which may have null-valued
entries
Output:
array with nans filled in with the mean of the dataset
"""
# returns an array of rows and column indices
nanvals = np.isnan(array)
indices = np.where(nanvals)
def loops(array, axis):
try:
return np.shape(array)[axis]
except IndexError:
return 1
ran = loops(array, 1)
if means is None:
means = {}
if ran == 1:
array = np.array(array)
means[0] = np.mean(array[~np.isnan(array)])
for row in zip(*indices):
array[row] = means[0]
else:
for col in range(ran):
means[col] = np.mean(array[~np.isnan(array[:, col]), col])
for row, col in zip(*indices):
array[row, col] = means[col]
else:
if ran == 1:
for row in zip(*indices):
array[row] = means[0]
else:
for row, col in zip(*indices):
array[row, col] = means[col]
return array, means
def train_model(target, features, model_params, test_split):
"""
Train the Gradient Boosting model on the provided data to calculate
the accuracy of the model
Input:
@param target: 1D Array of the variable that the model is to be
trained to predict
@param features: 2D Array NSamples *NFeatures to use in training
the model
@param model_params: A dictionary of model parameters, the full
specification can be found on the
scikit learn page for [GradientBoostingRegressor]
(http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html)
@parma test_split: The fraction of the data to be withheld for
testing the model / calculating the accuray
"""
features_train, features_test, \
target_train, target_test = train_test_split(features, target,
test_size=test_split)
model = GradientBoostingRegressor(**model_params)
model.fit(features_train, target_train)
accuracy = calculate_model_accuracy(model, features_test, target_test)
return model, accuracy
def calculate_model_accuracy(model, features_test, target_test):
"""
Calculate the mean squared error of the model prediction
Input:
@param model: model trained from input features
@param features_test: test features set to make prediction from
@param target_test: test target set to compare predictions to
Output:
mean squared error of the model prection compared target_test
"""
prediction = model.predict(features_test)
return metrics.mean_squared_error(prediction, target_test)
| bsd-3-clause |
claesenm/HPOlib | HPOlib/Plotting/plotTrace_perExp.py | 5 | 6055 | #!/usr/bin/env python
##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from argparse import ArgumentParser
import cPickle
import itertools
import sys
from matplotlib.pyplot import tight_layout, figure, subplots_adjust, subplot, savefig, show
import matplotlib.gridspec
import numpy as np
from HPOlib.Plotting import plot_util
__authors__ = ["Katharina Eggensperger", "Matthias Feurer"]
__contact__ = "automl.org"
def plot_optimization_trace_cv(trial_list, name_list, optimum=0, title="",
log=True, save="", y_max=0, y_min=0):
markers =plot_util.get_plot_markers()
colors = plot_util.get_plot_colors()
linestyles = itertools.cycle(['-'])
size = 1
ratio = 5
gs = matplotlib.gridspec.GridSpec(ratio, 1)
fig = figure(1, dpi=100)
fig.suptitle(title, fontsize=16)
ax1 = subplot(gs[0:ratio, :])
ax1.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
min_val = sys.maxint
max_val = -sys.maxint
max_trials = 0
fig.suptitle(title, fontsize=16)
# Plot the average error and std
for i in range(len(name_list)):
m = markers.next()
c = colors.next()
l = linestyles.next()
leg = False
for tr in trial_list[i]:
if log:
tr = np.log10(tr)
x = range(1, len(tr)+1)
y = tr
if not leg:
ax1.plot(x, y, color=c, linewidth=size, linestyle=l, label=name_list[i][0])
leg = True
ax1.plot(x, y, color=c, linewidth=size, linestyle=l)
min_val = min(min_val, min(tr))
max_val = max(max_val, max(tr))
max_trials = max(max_trials, len(tr))
# Maybe plot on logscale
ylabel = ""
if log:
ax1.set_ylabel("log10(Minfunction value)" + ylabel)
else:
ax1.set_ylabel("Minfunction value" + ylabel)
# Descript and label the stuff
leg = ax1.legend(loc='best', fancybox=True)
leg.get_frame().set_alpha(0.5)
ax1.set_xlabel("#Function evaluations")
if y_max == y_min:
# Set axes limits
ax1.set_ylim([min_val-0.1*abs((max_val-min_val)), max_val+0.1*abs((max_val-min_val))])
else:
ax1.set_ylim([y_min, y_max])
ax1.set_xlim([0, max_trials + 1])
tight_layout()
subplots_adjust(top=0.85)
if save != "":
savefig(save, dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches="tight", pad_inches=0.1)
else:
show()
def main(pkl_list, name_list, autofill, optimum=0, save="", title="", log=False,
y_min=0, y_max=0):
trial_list = list()
for i in range(len(pkl_list)):
tmp_trial_list = list()
max_len = -sys.maxint
for pkl in pkl_list[i]:
fh = open(pkl, "r")
trials = cPickle.load(fh)
fh.close()
trace = plot_util.get_Trace_cv(trials)
tmp_trial_list.append(trace)
max_len = max(max_len, len(trace))
trial_list.append(list())
for tr in tmp_trial_list:
# if len(tr) < max_len:
# tr.extend([tr[-1] for idx in range(abs(max_len - len(tr)))])
trial_list[-1].append(np.array(tr))
plot_optimization_trace_cv(trial_list, name_list, optimum, title=title, log=log,
save=save, y_min=y_min, y_max=y_max)
if save != "":
sys.stdout.write("Saved plot to " + save + "\n")
else:
sys.stdout.write("..Done\n")
if __name__ == "__main__":
prog = "python plotTraceWithStd.py WhatIsThis <oneOrMorePickles> [WhatIsThis <oneOrMorePickles>]"
description = "Plot a Trace with std for multiple experiments"
parser = ArgumentParser(description=description, prog=prog)
# Options for specific benchmarks
parser.add_argument("-o", "--optimum", type=float, dest="optimum",
default=0, help="If not set, the optimum is supposed to be zero")
# Options which are available only for this plot
parser.add_argument("-a", "--autofill", action="store_true", dest="autofill",
default=False, help="Fill trace automatically")
# General Options
parser.add_argument("-l", "--log", action="store_true", dest="log",
default=False, help="Plot on log scale")
parser.add_argument("--max", dest="max", type=float,
default=0, help="Maximum of the plot")
parser.add_argument("--min", dest="min", type=float,
default=0, help="Minimum of the plot")
parser.add_argument("-s", "--save", dest="save",
default="", help="Where to save plot instead of showing it?")
parser.add_argument("-t", "--title", dest="title",
default="", help="Optional supertitle for plot")
args, unknown = parser.parse_known_args()
sys.stdout.write("\nFound " + str(len(unknown)) + " arguments\n")
pkl_list_main, name_list_main = plot_util.get_pkl_and_name_list(unknown)
main(pkl_list=pkl_list_main, name_list=name_list_main, autofill=args.autofill, optimum=args.optimum,
save=args.save, title=args.title, log=args.log, y_min=args.min, y_max=args.max)
| gpl-3.0 |
GarrettSmith/Nearness | graphchi/conf/adminhtml/plots/plotter.py | 3 | 1235 | #!/usr/bin/python
import sys
import os
import matplotlib
import numpy
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import FormatStrFormatter
def getArg(param, default=""):
if (sys.argv.count(param) == 0): return default
i = sys.argv.index(param)
return sys.argv[i + 1]
lastsecs = int(getArg("lastsecs", 240))
fname = sys.argv[1]
try:
tdata = numpy.loadtxt(fname, delimiter=" ")
except:
exit(0)
if len(tdata.shape) < 2 or tdata.shape[0] < 2 or tdata.shape[1] < 2:
print "Too small data - do not try to plot yet."
exit(0)
times = tdata[:, 0]
values = tdata[:, 1]
lastt = max(times)
#majorFormatter = FormatStrFormatter('%.2f')
fig = plt.figure(figsize=(3.5, 2.0))
plt.plot(times[times > lastt - lastsecs], values[times > lastt - lastsecs])
plt.gca().xaxis.set_major_locator( MaxNLocator(nbins = 7, prune = 'lower') )
plt.xlim([max(0, lastt - lastsecs), lastt])
#plt.ylim([lastt - lastsecs, lastt])
plt.gca().yaxis.set_major_locator( MaxNLocator(nbins = 7, prune = 'lower') )
#plt.gca().yaxis.set_major_formatter(majorFormatter)
plt.savefig(fname.replace(".dat", ".png"), format="png", bbox_inches='tight')
| gpl-3.0 |
sukritranjan/ranjansasselov2016b | compute_UV_doses.py | 1 | 29816 | # -*- coding: iso-8859-1 -*-
"""
This code is used to weigh the UV radiances we compute by biological action spectra.
"""
########################
###Import useful libraries
########################
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pdb
from matplotlib.pyplot import cm
from scipy import interpolate as interp
import scipy.integrate
########################
###Set physical constants
########################
hc=1.98645e-9 #value of h*c in erg*nm
def cm2inch(cm): #function to convert cm to inches; useful for complying with Astrobiology size guidelines
return cm/2.54
########################
###Decide which bits of the calculation will be run
########################
plotactionspec=False #if true, plots the action spectra we are using.
plotactionspec_talk=False #if true, plots the action spectra we are using...but, optimized for a talk instead of a paper
calculatealbaz=False #if true, generates the table for the albedo and zenith angle study
calculateco2=False #if true, generates the table for the co2 study
calculatealtgas=True #if true, generates the table for the alternate gas study
########################
###Helper functions: I/O
########################
def get_UV(filename):
"""
Input: filename (including path)
Output: (wave_leftedges, wav_rightedges, surface radiance) in units of (nm, nm, photons/cm2/sec/nm)
"""
wav_leftedges, wav_rightedges, wav, toa_intensity, surface_flux, surface_intensity, surface_intensity_diffuse, surface_intensity_direct=np.genfromtxt(filename, skip_header=1, skip_footer=0, usecols=(0, 1, 2,3,4,6,7,8), unpack=True)
surface_intensity_photons=surface_intensity*(wav/(hc))
return wav_leftedges, wav_rightedges, surface_intensity_photons
########################
###Helper functions: UV Dosimeters
########################
def integrated_radiance(wav_left, wav_right, surf_int, leftlim, rightlim):
"""
Computes the surface radiance integrated from leftlim to rightlim. Does this by doing a trapezoid sum. NOTE: The method I have chosen works only so long as the limits line up with the bin edges!
wav_left: left edge of wavelength bin, in nm
wav_right: right edge of wavelength bin, in nm
surf_int: total surface intensity (radiance, hemispherically-integrated) in photons/cm2/s/nm, in bin defined by wav_left and wav_right
produceplots: if True, shows plots of what it is computing
returnxy: if True, returns x,y for action spectrum.
"""
allowed_inds=np.where((wav_left>=leftlim) & (wav_right<=rightlim))
delta_wav=wav_right[allowed_inds]-wav_left[allowed_inds]
surf_int_integrated=np.sum(surf_int[allowed_inds]*delta_wav) #integration converts from photons/cm2/s/nm to photons/cm2/s
return surf_int_integrated
def tricyano_aqe_prodrate(wav_left, wav_right, surf_int, lambda0, produceplots, returnxy):
"""
Weights the input surface intensities by the action spectrum for the photoproduction of aquated electrons from Ritson+2012 and Patel+2015, i.e. irradiation of tricyano cuprate. The action spectrum is composed of the absorption spectrum multiplied by an assumed quantum yield function. We assume the QY function to be a step function, stepping from 0 at wavelengths longer than lambda0 to 0.06 at wavelengths shorter than lambda0. We choose 0.06 for the step function to match the estimate found by Horvath+1984; we note this value may be pH sensitive. Empirically, we know that lambda0>254 nm, but that's about it.
This process is an eustressor for abiogenesis.
wav_left: left edge of wavelength bin, in nm
wav_right: right edge of wavelength bin, in nm
surf_int: total surface intensity (radiance, hemispherically-integrated) in photons/cm2/s/nm, in bin defined by wav_left and wav_right
lambda0: value assume for lambda0.
produceplots: if True, shows plots of what it is computing
returnxy: if True, returns x,y for action spectrum.
"""
####Step 1: reduce input spectrum to match bounds of available dataset.
int_min=190.0 #This lower limit of integration is set by the limits of the cucn3 absorption dataset (left edge of bin)
int_max=351.0 #This upper limit of integration is set by the limits of the cucn3 absorption dataset (right edge of bin)
allowed_inds=np.where((wav_left>=int_min) & (wav_right<=int_max)) #indices that correspond to included data
wav_left=wav_left[allowed_inds]
wav_right=wav_right[allowed_inds]
surf_int=surf_int[allowed_inds]
delta_wav=wav_right-wav_left #size of wavelength bins in nm
####Step 2: form the action spectrum from the absorption spectrum and QY curve.
#Import the tricyanocuprate absorption spectrum
importeddata=np.genfromtxt('./Raw_Data/Magnani_Data/CuCN3_XC.dat', skip_header=2)
cucn3_wav=importeddata[:,0] #wav in nm
cucn3_molabs=importeddata[:,1] #molar absorptivities in L/(mol*cm), decadic
cucn3_molabs_func=interp.interp1d(cucn3_wav, cucn3_molabs, kind='linear') #functionalized form of cucn3 molar absorption
#does not matter if you use decadic or natural logarithmic as constant factors normalize out anyway
#Formulate the step-function quantum yield curve
def qy_stepfunc(wav, lambda0): #step function, for the photoionization model
"""Returns 1 for wav<=lambda0 and 0 for wav>lambda0"""
qy=np.zeros(np.size(wav))# initialize all to zero
inds=np.where(wav<=lambda0) #indices where the wavelength is below the threshold
qy[inds]=qy[inds]+0.06 #increase the QE to 1 at the indices where the wavelength is below the threshold
return qy
#Integrate these quantities to match the input spectral resolution
qy_dist=np.zeros(np.shape(wav_left))#initialize variable to hold the QY integrated over the surface intensity wavelength bins
cucn3_molabs_dist=np.zeros(np.shape(wav_left))#initialize variable to hold the QY integrated over the surface intensity wavelength bins
for ind in range(0, len(wav_left)):
leftedge=wav_left[ind]
rightedge=wav_right[ind]
cucn3_molabs_dist[ind]=scipy.integrate.quad(cucn3_molabs_func, leftedge, rightedge, epsabs=0, epsrel=1e-5)[0]/(rightedge-leftedge)
qy_dist[ind]=scipy.integrate.quad(qy_stepfunc, leftedge, rightedge, args=(lambda0), epsabs=0, epsrel=1e-5)[0]/(rightedge-leftedge)
action_spectrum=cucn3_molabs_dist*qy_dist
#Normalize action spectrum to 1 at 195 (arbitrary)
action_spectrum=action_spectrum*(1./(np.interp(190., 0.5*(wav_left+wav_right), action_spectrum)))
####Step 3: Compute action-spectrum weighted total intensity
weighted_surface_intensity=surf_int*action_spectrum
total_weighted_radiance=np.sum(weighted_surface_intensity*delta_wav) #units: photons/cm2/s
####Step 4 (Optional): Plot various components of action spectrum to show the multiplication
if produceplots:
legendfontsize=12
axisfontsize=12
##Plot ribonucleotide absorption and interpolation
fig1, axarr=plt.subplots(3,2,sharex=True, figsize=(8., 10.5)) #specify figure size (width, height) in inches
axarr[0,0].bar(wav_left, surf_int,width=delta_wav, color='black', alpha=0.5, log=True)
axarr[0,0].set_ylim([1e10,1e16])
axarr[0,0].legend(loc=2, prop={'size':legendfontsize})
axarr[0,0].yaxis.grid(True)
axarr[0,0].xaxis.grid(True)
axarr[0,0].set_ylabel('Surface Radiance \n(photons cm$^{-2}$s$^{-1}$nm$^{-1}$)', fontsize=axisfontsize)
#axarr[0,0].title.set_position([0.5, 1.11])
#axarr[0,0].text(0.5, 1.1, r'a(i)', transform=axarr[0].transAxes, va='top')
axarr[1,0].bar(wav_left, cucn3_molabs_dist,width=delta_wav, color='black', alpha=0.5, log=True)
#axarr[1,0].set_ylim([-0.1, 1.1])
axarr[1,0].legend(loc=6, prop={'size':legendfontsize})
axarr[1,0].yaxis.grid(True)
axarr[1,0].xaxis.grid(True)
axarr[1,0].set_ylabel('CuCN3 Molar Absorptivity\n(M$^{-1}$cm$^{-1}$)', fontsize=axisfontsize)
#axarr[1,0].text(0.5, 1.10, r'b(i)', fontsize=12, transform=axarr[1].transAxes, va='top')
axarr[2,0].bar(wav_left, qy_dist,width=delta_wav, color='black', alpha=0.5)
axarr[2,0].set_ylim([-0.01, 0.06])
axarr[2,0].legend(loc=6, prop={'size':legendfontsize})
axarr[2,0].yaxis.grid(True)
axarr[2,0].xaxis.grid(True)
axarr[2,0].set_ylabel('Quantum Efficiency \n(reductions absorption$^{-1}$)', fontsize=axisfontsize)
#axarr[2,0].text(0.5, 1.10, r'c(i)', fontsize=12,transform=axarr[2].transAxes, va='top')
axarr[0,1].bar(wav_left, action_spectrum,width=delta_wav, color='black', alpha=0.5)
#axarr[0,1].set_ylim([-0.1, 1.1])
axarr[0,1].legend(loc=6, prop={'size':legendfontsize})
axarr[0,1].yaxis.grid(True)
axarr[0,1].xaxis.grid(True)
axarr[0,1].set_ylabel('Action Spectrum', fontsize=axisfontsize)
#axarr[0,1].text(0.5, 1.10, r'b(i)', fontsize=12, transform=axarr[1].transAxes, va='top')
axarr[1,1].bar(wav_left, weighted_surface_intensity,width=delta_wav, color='black', alpha=0.5)
#axarr[1,1].set_ylim([-0.1, 1.1])
axarr[1,1].legend(loc=6, prop={'size':legendfontsize})
axarr[1,1].yaxis.grid(True)
axarr[1,1].xaxis.grid(True)
axarr[1,1].set_ylabel('Weighted Surface Radiance', fontsize=axisfontsize)
#axarr[1,1].text(0.5, 1.10, r'b(i)', fontsize=12, transform=axarr[1].transAxes, va='top')
#plt.savefig('/home/sranjan/Python/UV/Plots/ritson_assumed_qe_v3.pdf', orientation='portrait',papertype='letter', format='pdf')
plt.show()
if returnxy:
return 0.5*(wav_left+wav_right), action_spectrum
else:
return total_weighted_radiance
def ump_glycosidic_photol(wav_left, wav_right, surf_int, lambda0, produceplots, returnxy):
"""
Weights the input surface intensities by the action spectrum for cleavage of the glycosidic bond in UMP (the U-RNA monomer), aka base release. We form this spectrum by convolving the pH=7.6 absorption spectrum for Uridine-3'-(2')-phosporic acid (i.e. uridylic acid, UMP) from Voet et al (1963) with an assumed QY curve. The QY curve is based on the work of Gurzadyan and Gorner (1994); they measure (wavelength, QY) for N-glycosidic bond cleavage in UMP in anoxic aqueous solution (Ar-suffused) to be (193 nm, 4.3e-3) and (254 nm, (2-3)e-5). Specifically, we assume that QY=4.3e-3 for lambda<=lambda_0 and QY=2.5e-5 for lambda>lambda_0. natural choices of lambda_0 are 194, 254, and 230 (first two: empirical limits. Last: end of pi-pi* absorption bad, Sinsheimer+1949 suggest it is onset of irreversible photolytic damage).
This process is a stressor for abiogenesis.
wav_left: left edge of wavelength bin, in nm
wav_right: right edge of wavelength bin, in nm
surf_int: total surface intensity (radiance, hemispherically-integrated) in photons/cm2/s/nm, in bin defined by wav_left and wav_right
lambda0: value assume for lambda0.
produceplots: if True, shows plots of what it is computing
returnxy: if True, returns x,y for action spectrum.
"""
####Step 1: reduce input spectrum to match bounds of available dataset (absorption).
int_min=184.0 #This lower limit of integration is set by the limits of the cucn3 absorption dataset (left edge of bin)
int_max=299.0 #This upper limit of integration is set by the limits of the cucn3 absorption dataset (right edge of bin)
allowed_inds=np.where((wav_left>=int_min) & (wav_right<=int_max)) #indices that correspond to included data
wav_left=wav_left[allowed_inds]
wav_right=wav_right[allowed_inds]
surf_int=surf_int[allowed_inds]
delta_wav=wav_right-wav_left #size of wavelength bins in nm
####Step 2: form the action spectrum from the absorption spectrum and QY curve.
#Import the UMP absorption spectrum from Voet et al 1963
importeddata=np.genfromtxt('./Raw_Data/Voet_Data/ribouridine_pH_7.3_v2.txt', skip_header=0, delimiter=',')
ump_wav=importeddata[:,0] #wav in nm
ump_molabs=importeddata[:,1] #molar absorptivities\times 10^{3}, i.e. in units of 10^{-3} L/(mol*cm), decadic (I think -- unit scheme unclear in paper. Not important since normalized out)
ump_molabs_func=interp.interp1d(ump_wav, ump_molabs, kind='linear') #functionalized form of molar absorption
#does not matter if you use decadic or natural logarithmic as constant factors normalize out anyway
#Formulate the step-function quantum yield curve
def qy_stepfunc(wav, lambda0): #step function, for the photoionization model
"""QY based on work of Gurzadyan and Gorner 1994"""
qy=np.zeros(np.size(wav))# initialize all to zero
inds1=np.where(wav<=lambda0) #indices where the wavelength is below the threshold
inds2=np.where(wav>lambda0) #indices where the wavelength is below the threshold
qy[inds1]=qy[inds1]+4.3e-3 #High QY for lambda<=lambda0
qy[inds2]=qy[inds2]+2.5e-5 #Low QY for lambda>lambda0
return qy
#Integrate these quantities to match the input spectral resolution
qy_dist=np.zeros(np.shape(wav_left))#initialize variable to hold the QY integrated over the surface intensity wavelength bins
ump_molabs_dist=np.zeros(np.shape(wav_left))#initialize variable to hold the UMP absorption integrated over the surface intensity wavelength bins
for ind in range(0, len(wav_left)):
leftedge=wav_left[ind]
rightedge=wav_right[ind]
ump_molabs_dist[ind]=scipy.integrate.quad(ump_molabs_func, leftedge, rightedge, epsabs=0, epsrel=1e-5)[0]/(rightedge-leftedge)
qy_dist[ind]=scipy.integrate.quad(qy_stepfunc, leftedge, rightedge, args=(lambda0),epsabs=0, epsrel=1e-5)[0]/(rightedge-leftedge)
action_spectrum=ump_molabs_dist*qy_dist
#Normalize action spectrum to 1 at 195 (arbitrary)
action_spectrum=action_spectrum*(1./(np.interp(190., 0.5*(wav_left+wav_right), action_spectrum)))
####Step 3: Compute action-spectrum weighted total intensity
weighted_surface_intensity=surf_int*action_spectrum
total_weighted_radiance=np.sum(weighted_surface_intensity*delta_wav) #units: photons/cm2/s
####Step 4 (Optional): Plot various components of action spectrum to show the multiplication
if produceplots:
legendfontsize=12
axisfontsize=12
##Plot ribonucleotide absorption and interpolation
fig1, axarr=plt.subplots(3,2,sharex=True, figsize=(8., 10.5)) #specify figure size (width, height) in inches
axarr[0,0].bar(wav_left, surf_int,width=delta_wav, color='black', alpha=0.5, log=True)
axarr[0,0].set_ylim([1e10,1e16])
axarr[0,0].legend(loc=2, prop={'size':legendfontsize})
axarr[0,0].yaxis.grid(True)
axarr[0,0].xaxis.grid(True)
axarr[0,0].set_ylabel('Surface Radiance \n(photons cm$^{-2}$s$^{-1}$nm$^{-1}$)', fontsize=axisfontsize)
#axarr[0,0].title.set_position([0.5, 1.11])
#axarr[0,0].text(0.5, 1.1, r'a(i)', transform=axarr[0].transAxes, va='top')
axarr[1,0].bar(wav_left, ump_molabs_dist,width=delta_wav, color='black', alpha=0.5, log=False)
#axarr[1,0].set_ylim([-0.1, 1.1])
axarr[1,0].legend(loc=6, prop={'size':legendfontsize})
axarr[1,0].yaxis.grid(True)
axarr[1,0].xaxis.grid(True)
axarr[1,0].set_ylabel('UMP Molar Absorptivity\n(M$^{-1}$cm$^{-1}$)', fontsize=axisfontsize)
#axarr[1,0].text(0.5, 1.10, r'b(i)', fontsize=12, transform=axarr[1].transAxes, va='top')
axarr[2,0].bar(wav_left, qy_dist,width=delta_wav, color='black', alpha=0.5, log=True)
axarr[2,0].set_ylim([1e-5, 1e-2])
axarr[2,0].legend(loc=6, prop={'size':legendfontsize})
axarr[2,0].yaxis.grid(True)
axarr[2,0].xaxis.grid(True)
axarr[2,0].set_ylabel('Quantum Efficiency \n(reductions absorption$^{-1}$)', fontsize=axisfontsize)
#axarr[2,0].text(0.5, 1.10, r'c(i)', fontsize=12,transform=axarr[2].transAxes, va='top')
axarr[0,1].bar(wav_left, action_spectrum,width=delta_wav, color='black', alpha=0.5)
#axarr[0,1].set_ylim([-0.1, 1.1])
axarr[0,1].legend(loc=6, prop={'size':legendfontsize})
axarr[0,1].yaxis.grid(True)
axarr[0,1].xaxis.grid(True)
axarr[0,1].set_ylabel('Action Spectrum', fontsize=axisfontsize)
#axarr[0,1].text(0.5, 1.10, r'b(i)', fontsize=12, transform=axarr[1].transAxes, va='top')
axarr[1,1].bar(wav_left, weighted_surface_intensity,width=delta_wav, color='black', alpha=0.5)
#axarr[1,1].set_ylim([-0.1, 1.1])
axarr[1,1].legend(loc=6, prop={'size':legendfontsize})
axarr[1,1].yaxis.grid(True)
axarr[1,1].xaxis.grid(True)
axarr[1,1].set_ylabel('Weighted Surface Radiance', fontsize=axisfontsize)
#axarr[1,1].text(0.5, 1.10, r'b(i)', fontsize=12, transform=axarr[1].transAxes, va='top')
#plt.savefig('/home/sranjan/Python/UV/Plots/ritson_assumed_qe_v3.pdf', orientation='portrait',papertype='letter', format='pdf')
plt.show()
if returnxy:
return 0.5*(wav_left+wav_right), action_spectrum
else:
return total_weighted_radiance
########################
###Plot UV Dosimeters
########################
if plotactionspec:
#Set up wavelength scale
wave_left=np.arange(100., 500.)
wave_right=np.arange(101., 501.)
wave_centers=0.5*(wave_left+wave_right)
surf_int=np.ones(np.shape(wave_centers)) #for our purposes here, this is a thunk.
#Extract action spectra
wav_gly_193, actspec_gly_193=ump_glycosidic_photol(wave_left, wave_right, surf_int, 193., False, True)
wav_gly_230, actspec_gly_230=ump_glycosidic_photol(wave_left, wave_right, surf_int, 230., False, True)
wav_gly_254, actspec_gly_254=ump_glycosidic_photol(wave_left, wave_right, surf_int, 254., False, True)
wav_aqe_254, actspec_aqe_254=tricyano_aqe_prodrate(wave_left, wave_right, surf_int, 254., False, True)
wav_aqe_300, actspec_aqe_300=tricyano_aqe_prodrate(wave_left, wave_right, surf_int, 300., False, True)
#####Plot action spectra
#Initialize Figure
fig, (ax1)=plt.subplots(1, figsize=(cm2inch(16.5),6), sharex=True)
colorseq=iter(cm.rainbow(np.linspace(0,1,5)))
#Plot Data
ax1.plot(wav_gly_193,actspec_gly_193, linestyle='-',linewidth=2, color=next(colorseq), label=r'UMP Gly Bond Cleavage ($\lambda_0=193$)')
ax1.plot(wav_gly_230,actspec_gly_230, linestyle='-',linewidth=2, color=next(colorseq), label=r'UMP Gly Bond Cleavage ($\lambda_0=230$)')
ax1.plot(wav_gly_254,actspec_gly_254, linestyle='-',linewidth=2, color=next(colorseq), label=r'UMP Gly Bond Cleavage ($\lambda_0=254$)')
ax1.plot(wav_aqe_254,actspec_aqe_254, linestyle='-',linewidth=2, color=next(colorseq), label=r'CuCN$_{3}$$^{2-}$ Photoionization ($\lambda_0=254$)')
ax1.plot(wav_aqe_300,actspec_aqe_300, linestyle='--',linewidth=2, color=next(colorseq), label=r'CuCN$_{3}$$^{2-}$ Photoionization ($\lambda_0=300$)')
#####Finalize and save figure
ax1.set_title(r'Action Spectra')
ax1.set_xlim([180.,360.])
ax1.set_xlabel('nm')
ax1.set_ylabel(r'Relative Sensitivity')
ax1.set_yscale('log')
ax1.set_ylim([1e-6, 1e2])
#ax1.legend(bbox_to_anchor=[0, 1.1, 1,1], loc=3, ncol=2, mode='expand', borderaxespad=0., fontsize=10)
ax1.legend(loc='upper right', ncol=1, fontsize=10)
plt.tight_layout(rect=(0,0,1,1))
plt.savefig('./Plots/actionspectra.eps', orientation='portrait',papertype='letter', format='eps')
if plotactionspec_talk:
#Set up wavelength scale
wave_left=np.arange(100., 500.)
wave_right=np.arange(101., 501.)
wave_centers=0.5*(wave_left+wave_right)
surf_int=np.ones(np.shape(wave_centers)) #for our purposes here, this is a thunk.
#Extract action spectra
wav_gly_193, actspec_gly_193=ump_glycosidic_photol(wave_left, wave_right, surf_int, 193., False, True)
wav_gly_230, actspec_gly_230=ump_glycosidic_photol(wave_left, wave_right, surf_int, 230., False, True)
wav_gly_254, actspec_gly_254=ump_glycosidic_photol(wave_left, wave_right, surf_int, 254., False, True)
wav_aqe_254, actspec_aqe_254=tricyano_aqe_prodrate(wave_left, wave_right, surf_int, 254., False, True)
wav_aqe_300, actspec_aqe_300=tricyano_aqe_prodrate(wave_left, wave_right, surf_int, 300., False, True)
#####Plot action spectra
#Initialize Figure
fig, (ax1)=plt.subplots(1, figsize=(10,9), sharex=True)
colorseq=iter(cm.rainbow(np.linspace(0,1,5)))
#Plot Data
ax1.plot(wav_gly_193,actspec_gly_193, linestyle='-',linewidth=3, color=next(colorseq), label=r'UMP-193')
ax1.plot(wav_gly_230,actspec_gly_230, linestyle='-',linewidth=3, color=next(colorseq), label=r'UMP-230')
ax1.plot(wav_gly_254,actspec_gly_254, linestyle='-',linewidth=3, color=next(colorseq), label=r'UMP-254')
ax1.plot(wav_aqe_254,actspec_aqe_254, linestyle='-',linewidth=3, color=next(colorseq), label=r'CuCN3-254')
ax1.plot(wav_aqe_300,actspec_aqe_300, linestyle='--',linewidth=3, color=next(colorseq), label=r'CuCN3-300')
#####Finalize and save figure
ax1.set_title(r'Action Spectra', fontsize=24)
ax1.set_xlim([180.,360.])
ax1.set_xlabel('nm',fontsize=24)
ax1.set_ylabel(r'Relative Sensitivity', fontsize=24)
ax1.set_yscale('log')
ax1.set_ylim([1e-6, 1e2])
ax1.legend(bbox_to_anchor=[0, 1.1, 1,0.5], loc=3, ncol=2, mode='expand', borderaxespad=0., fontsize=24)
#ax1.legend(loc='upper right', ncol=1, fontsize=16)
ax1.xaxis.set_tick_params(labelsize=24)
ax1.yaxis.set_tick_params(labelsize=24)
plt.tight_layout(rect=(0,0,1,0.75))
plt.savefig('./TalkFigs/actionspectra.pdf', orientation='portrait',papertype='letter', format='pdf')
########################
###Set "base" values to normalize the alb-zen, co2, and alt-gas dosimeters by
########################
#Use the TOA flux in order to get a good, physically understandable denominator.
wav_leftedges, wav_rightedges, wav, toa_intensity=np.genfromtxt('./TwoStreamOutput/AlbZen/rugheimer_earth_epoch0_a=0.2_z=60.dat', skip_header=1, skip_footer=0, usecols=(0, 1,2, 3), unpack=True)
toa_intensity_photons=toa_intensity*(wav/(hc))
#Compute base doses
intrad100_165_base=integrated_radiance(wav_leftedges, wav_rightedges, toa_intensity_photons, 100, 165.) #This measures the flux vulnerable to activity
intrad200_300_base=integrated_radiance(wav_leftedges, wav_rightedges, toa_intensity_photons, 200., 300.) #This is just an empirical gauge.
umpgly_193_base=ump_glycosidic_photol(wav_leftedges, wav_rightedges, toa_intensity_photons, 193., False, False)
umpgly_230_base=ump_glycosidic_photol(wav_leftedges, wav_rightedges, toa_intensity_photons,230., False, False)
umpgly_254_base=ump_glycosidic_photol(wav_leftedges, wav_rightedges, toa_intensity_photons, 254., False, False)
tricyano254_base=tricyano_aqe_prodrate(wav_leftedges, wav_rightedges, toa_intensity_photons, 254., False, False)
tricyano300_base=tricyano_aqe_prodrate(wav_leftedges, wav_rightedges, toa_intensity_photons, 300., False, False)
########################
###Run code for albedo, zenith angle
########################
if calculatealbaz:
#Evaluate only two zenith angles (to show range of variation)
zenithangles=['66.5', '0']
albedos=['tundra', 'ocean', 'desert', 'oldsnow', 'newsnow']
for zenind in range(0, len(zenithangles)):
zenithangle=zenithangles[zenind]
for albind in range(0, len(albedos)):
albedo=albedos[albind]
datafile='./TwoStreamOutput/AlbZen/rugheimer_earth_epoch0_a='+albedo+'_z='+zenithangle+'.dat'
left, right, surface_int=get_UV(datafile)
intrad100_165=integrated_radiance(left, right, surface_int, 100, 165.) #This measures the flux vulnerable to activity
intrad200_300=integrated_radiance(left, right, surface_int, 200., 300.) #This is just an empirical gauge.
umpgly_193=ump_glycosidic_photol(left, right, surface_int, 193., False, False)
umpgly_230=ump_glycosidic_photol(left, right, surface_int,230., False, False)
umpgly_254=ump_glycosidic_photol(left, right, surface_int, 254., False, False)
tricyano254=tricyano_aqe_prodrate(left, right, surface_int, 254., False, False)
tricyano300=tricyano_aqe_prodrate(left, right, surface_int, 300., False, False)
line=np.array([zenithangle, albedo, intrad100_165/intrad100_165_base,intrad200_300/intrad200_300_base, umpgly_193/umpgly_193_base, umpgly_230/umpgly_230_base, umpgly_254/umpgly_254_base, tricyano254/tricyano254_base, tricyano300/tricyano300_base])
if (albind==0 and zenind==0):
albzentable=line #need to initialize in this case
else:
albzentable=np.vstack((albzentable, line))
#Save output
f=open('./Doses/albzen_uv_doses.dat','w')
f.write('All Dosimeters Normalized to Space Radiation Case\n')
np.savetxt(f, albzentable, delimiter=' ', fmt='%s', newline='\n', header='Zenith Angle & Albedo & Radiance (100-165 nm) & Radiance (200-300 nm) & UMP Gly Cleavage (lambda0=193nm) & UMP Gly Cleavage (lambda0=230nm) & UMP Gly Cleavage (lambda0=254nm) & CuCN3 Photoionization (lambda0=254 nm) & CuCN3 Photoionization (lambda0=300 nm)\n')
f.close()
########################
###Run code for varying CO2 levels
########################
if calculateco2:
N_co2_rugh=2.09e24 #column density of CO2 in Rugheimer base model (cm**-2)
co2multiples=np.array([0., 1.e-6,1.e-5, 1.e-4, 1.e-3, 0.00893, 1.e-2, 1.e-1, 0.6, 1., 1.33, 1.e1, 46.6, 1.e2, 470., 1.e3])
zenithangles=['0', '66.5']
albedos=['newsnow', 'tundra']
for surfind in range(0, len(zenithangles)):
albedo=albedos[surfind]
zenithangle=zenithangles[surfind]
for multind in range(0, len(co2multiples)):
multiple=co2multiples[multind]
colden_co2=N_co2_rugh*multiple
datafile='./TwoStreamOutput/CO2lim/surface_intensities_co2limits_co2multiple='+str(multiple)+'_a='+albedo+'_z='+zenithangle+'.dat'
left, right, surface_int=get_UV(datafile)
intrad100_165=integrated_radiance(left, right, surface_int, 100, 165.) #This measures the flux vulnerable to activity
intrad200_300=integrated_radiance(left, right, surface_int, 200., 300.) #This is just an empirical gauge.
umpgly_193=ump_glycosidic_photol(left, right, surface_int, 193., False, False)
umpgly_230=ump_glycosidic_photol(left, right, surface_int,230., False, False)
umpgly_254=ump_glycosidic_photol(left, right, surface_int, 254., False, False)
tricyano254=tricyano_aqe_prodrate(left, right, surface_int, 254., False, False)
tricyano300=tricyano_aqe_prodrate(left, right, surface_int, 300., False, False)
#print intrad200_300
#pdb.set_trace()
line=np.array([zenithangle, albedo, colden_co2, intrad100_165/intrad100_165_base,intrad200_300/intrad200_300_base, umpgly_193/umpgly_193_base, umpgly_230/umpgly_230_base, umpgly_254/umpgly_254_base, tricyano254/tricyano254_base, tricyano300/tricyano300_base])
if (multind==0 and surfind==0):
co2table=line #need to initialize in this case
else:
co2table=np.vstack((co2table, line))
#Save Output
f=open('./Doses/co2_uv_doses.dat','w')
f.write('All Dosimeters Normalized to Space Radiation Case\n')
np.savetxt(f, co2table, delimiter=' ', fmt='%s', newline='\n', header='Zenith Angle & Albedo & Radiance (100-165 nm) & Radiance (200-300 nm) & UMP Gly Cleavage (lambda0=193nm) & UMP Gly Cleavage (lambda0=230nm) & UMP Gly Cleavage (lambda0=254nm) & CuCN3 Photoionization (lambda0=254 nm) & CuCN3 Photoionization (lambda0=300 nm)\n')
f.close()
########################
###Run code for alternate gas absorption.
########################
if calculatealtgas:
#####Set up info about the files to extract # All are the maximum possible natural surface radiance case (z=0, albedo=fresh snow) aka "max"
N_tot=2.0925e25#total column density of Rugheimer+2015 model in cm**-2
gaslist=['h2o', 'ch4', 'so2', 'o2', 'o3', 'h2s'] #list of gases we are doing this for
base_abundances=np.array([4.657e-3, 1.647e-6, 3.548e-11, 2.241e-6, 8.846e-11, 7.097e-11]) #molar concentration of each of these gases in the Rugheimer model.
gasmultiples={}#dict holding the multiples of the molar concentration we are using
gasmultiples['h2o']=np.array([1.e-5, 1.e-4, 1.e-3, 1.e-2, 1.e-1, 1., 1.e1, 1.e2, 1.e3, 1.e4, 1.e5])
gasmultiples['ch4']=np.array([1.e-5, 1.e-4, 1.e-3, 1.e-2, 1.e-1, 1., 1.e1, 1.e2, 1.e3, 1.e4, 1.e5])
gasmultiples['so2']=np.array([1.e-5, 1.e-4, 1.e-3, 1.e-2, 1.e-1, 1., 1.e1, 1.e2, 1.e3, 1.e4, 1.e5, 1.e6, 1.e7])
gasmultiples['o2']=np.array([1.e-5, 1.e-4, 1.e-3, 1.e-2, 1.e-1, 1., 1.e1, 1.e2, 1.e3, 1.e4, 1.e5])
gasmultiples['o3']=np.array([1.e-5, 1.e-4, 1.e-3, 1.e-2, 1.e-1, 1., 1.e1, 1.e2, 1.e3, 1.e4, 1.e5])
gasmultiples['h2s']=np.array([1.e-5, 1.e-4, 1.e-3, 1.e-2, 1.e-1, 1., 1.e1, 1.e2, 1.e3, 1.e4, 1.e5, 1.e6, 1.e7])
#####In a loop, extract the files and compute the statistics
for gasind in range(0, len(gaslist)):
gas=gaslist[gasind]
base_abundance=base_abundances[gasind]
multiples=gasmultiples[gas]
for multind in range(0, len(multiples)):
multiple=multiples[multind]
colden_X=base_abundance*multiple*N_tot #total column density of gas X
datafile='./TwoStreamOutput/gaslim/surface_intensities_'+gas+'limits_'+gas+'multiple='+str(multiple)+'_a=newsnow_z=0.dat'
left, right, surface_int=get_UV(datafile)
intrad100_165=integrated_radiance(left, right, surface_int, 100, 165.) #This measures the flux vulnerable to activity
intrad200_300=integrated_radiance(left, right, surface_int, 200., 300.) #This is just an empirical gauge.
umpgly_193=ump_glycosidic_photol(left, right, surface_int, 193., False, False)
umpgly_230=ump_glycosidic_photol(left, right, surface_int,230., False, False)
umpgly_254=ump_glycosidic_photol(left, right, surface_int, 254., False, False)
tricyano254=tricyano_aqe_prodrate(left, right, surface_int, 254., False, False)
tricyano300=tricyano_aqe_prodrate(left, right, surface_int, 300., False, False)
line=np.array([gas, colden_X, intrad100_165/intrad100_165_base,intrad200_300/intrad200_300_base, umpgly_193/umpgly_193_base, umpgly_230/umpgly_230_base, umpgly_254/umpgly_254_base, tricyano254/tricyano254_base, tricyano300/tricyano300_base])
if (multind==0):
altgastable=line #need to initialize in this case
else:
altgastable=np.vstack((altgastable, line))
f=open('./Doses/'+gas+'_uv_doses.dat','w')
f.write('All Dosimeters Normalized to Space Radiation Case\n')
np.savetxt(f, altgastable, delimiter=' & ', fmt='%s', newline='\n', header='Gas & Column Density (cm-2) & Radiance (100-165 nm) & Radiance (200-300 nm) & UMP Gly Cleavage (lambda0=193nm) & UMP Gly Cleavage (lambda0=230nm) & UMP Gly Cleavage (lambda0=254nm) & CuCN3 Photoionization (lambda0=254 nm) & CuCN3 Photoionization (lambda0=300 nm)\n')
f.close()
#Wrap Up
########################
###Wrap Up
########################
plt.show()
| mit |
dtkav/naclports | ports/ipython-ppapi/kernel.py | 7 | 12026 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A simple shell that uses the IPython messaging system."""
# Override platform information.
import platform
platform.system = lambda: "pnacl"
platform.release = lambda: "chrome"
import time
import json
import logging
import sys
import Queue
import thread
stdin_input = Queue.Queue()
shell_input = Queue.Queue()
stdin_output = Queue.Queue()
shell_output = Queue.Queue()
iopub_output = Queue.Queue()
sys_stdout = sys.stdout
sys_stderr = sys.stderr
def emit(s):
print >> sys_stderr, "EMITTING: %s" % (s)
time.sleep(1)
import IPython
from IPython.core.interactiveshell import InteractiveShell, InteractiveShellABC
from IPython.utils.traitlets import Type, Dict, Instance
from IPython.core.displayhook import DisplayHook
from IPython.utils import py3compat
from IPython.utils.py3compat import builtin_mod
from IPython.utils.jsonutil import json_clean, encode_images
from IPython.core.displaypub import DisplayPublisher
from IPython.config.configurable import Configurable
# module defined in shell.cc for communicating via pepper API
from pyppapi import nacl_instance
def CreateMessage(msg_type, parent_header=None, content=None):
if parent_header is None:
parent_header = {}
if content is None:
content = {}
return {
'header': {'msg_type': msg_type},
'parent_header': parent_header,
'content': content,
'msg_type': msg_type,
}
class MsgOutStream(object):
"""Class to overrides stderr and stdout."""
def __init__(self, stream_name):
self._stream_name = stream_name
self._parent_header = {}
def SetParentHeader(self, parent_header):
self._parent_header = parent_header
def close(self):
pass
def flush(self):
pass
def write(self, string):
iopub_output.put(CreateMessage('stream', parent_header=self._parent_header,
content={'name': self._stream_name, 'data': string}))
def writelines(self, sequence):
for string in sequence:
self.write(string)
# override sys.stdout and sys.stderr to broadcast on iopub
stdout_stream = MsgOutStream('stdout')
stderr_stream = MsgOutStream('stderr')
sys.stdout = stdout_stream
sys.stderr = stderr_stream
class PepperShellDisplayHook(DisplayHook):
parent_header = Dict({})
def set_parent_header(self, parent_header):
"""Set the parent for outbound messages."""
self.parent_header = parent_header
def start_displayhook(self):
self.content = {}
def write_output_prompt(self):
self.content['execution_count'] = self.prompt_count
def write_format_data(self, format_dict, md_dict=None):
self.content['data'] = encode_images(format_dict)
self.content['metadata'] = md_dict
def finish_displayhook(self):
sys.stdout.flush()
sys.stderr.flush()
iopub_output.put(CreateMessage('pyout', parent_header=self.parent_header,
content=self.content))
self.content = None
class PepperDisplayPublisher(DisplayPublisher):
parent_header = Dict({})
def set_parent_header(self, parent_header):
self.parent_header = parent_header
def _flush_streams(self):
"""flush IO Streams prior to display"""
sys.stdout.flush()
sys.stderr.flush()
def publish(self, source, data, metadata=None):
self._flush_streams()
if metadata is None:
metadata = {}
self._validate_data(source, data, metadata)
content = {}
content['source'] = source
content['data'] = encode_images(data)
content['metadata'] = metadata
iopub_output.put(CreateMessage('display_data', content=json_clean(content),
parent_header=self.parent_header))
def clear_output(self, stdout=True, stderr=True, other=True):
content = dict(stdout=stdout, stderr=stderr, other=other)
if stdout:
sys.stdout.write('\r')
if stderr:
sys.stderr.write('\r')
self._flush_streams()
iopub_output.put(CreateMessage('clear_output', content=content,
parent_header=self.parent_header))
class PepperInteractiveShell(InteractiveShell):
"""A subclass of InteractiveShell for the Pepper Messagin API."""
displayhook_class = Type(PepperShellDisplayHook)
display_pub_class = Type(PepperDisplayPublisher)
@staticmethod
def enable_gui(gui):
pass
InteractiveShellABC.register(PepperInteractiveShell)
class PepperKernel(Configurable):
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
shell_class = Type(PepperInteractiveShell)
def __init__(self):
self.shell = self.shell_class.instance(parent=self)
self.shell.run_cell("""
import os
matplotlib_config_dir = '/mplconfigdir'
os.environ['XDG_CONFIG_HOME'] = matplotlib_config_dir
os.environ['TMP'] = ''
import matplotlib
import matplotlib.cbook
""")
shell = PepperKernel().shell
# Taken from IPython 2.x branch, IPython/kernel/zmq/ipykernel.py
def _complete(msg):
c = msg['content']
try:
cpos = int(c['cursor_pos'])
except:
# If we don't get something that we can convert to an integer, at
# least attempt the completion guessing the cursor is at the end of
# the text, if there's any, and otherwise of the line
cpos = len(c['text'])
if cpos==0:
cpos = len(c['line'])
return shell.complete(c['text'], c['line'], cpos)
# Special message to indicate the NaCl kernel is ready.
iopub_output.put(CreateMessage('status', content={'execution_state': 'nacl_ready'}))
def _no_raw_input(self):
"""Raise StdinNotImplentedError if active frontend doesn't support
stdin."""
raise StdinNotImplementedError("raw_input was called, but this "
"frontend does not support stdin.")
def _raw_input(prompt, parent_header):
# Flush output before making the request.
sys.stderr.flush()
sys.stdout.flush()
# flush the stdin socket, to purge stale replies
while True:
try:
stdin_input.get_nowait()
except Queue.Empty:
break
# Send the input request.
content = json_clean(dict(prompt=prompt))
stdin_output.put(CreateMessage('input_request', content=content,
parent_header=parent_header))
# Await a response.
while True:
try:
reply = stdin_input.get()
except Exception:
print "Invalid Message"
except KeyboardInterrupt:
# re-raise KeyboardInterrupt, to truncate traceback
raise KeyboardInterrupt
else:
break
try:
value = py3compat.unicode_to_str(reply['content']['value'])
except:
print "Got bad raw_input reply: "
print reply
value = ''
if value == '\x04':
# EOF
raise EOFError
return value
def main_loop():
execution_count = 1
while 1:
iopub_output.put(CreateMessage('status', content={'execution_state': 'idle'}))
msg = shell_input.get()
iopub_output.put(CreateMessage('status', content={'execution_state': 'busy'}))
if not 'header' in msg:
continue
request_header = msg['header']
if not 'msg_type' in request_header:
continue
msg_type = request_header['msg_type']
if msg_type == 'execute_request':
try:
content = msg[u'content']
code = content[u'code']
silent = content[u'silent']
store_history = content.get(u'store_history', not silent)
except:
self.log.error("Got bad msg: ")
self.log.error("%s", msg)
continue
# Replace raw_input. Note that is not sufficient to replace
# raw_input in the user namespace.
if content.get('allow_stdin', False):
raw_input = lambda prompt='': _raw_input(prompt, request_header)
input = lambda prompt='': eval(raw_input(prompt))
else:
raw_input = input = lambda prompt='' : _no_raw_input()
if py3compat.PY3:
_sys_raw_input = builtin_mod.input
builtin_mod.input = raw_input
else:
_sys_raw_input = builtin_mod.raw_input
_sys_eval_input = builtin_mod.input
builtin_mod.raw_input = raw_input
builtin_mod.input = input
# Let output streams know which message the output is for
stdout_stream.SetParentHeader(request_header)
stderr_stream.SetParentHeader(request_header)
shell.displayhook.set_parent_header(request_header)
shell.display_pub.set_parent_header(request_header)
status = 'ok'
content = {}
try:
shell.run_cell(msg['content']['code'],
store_history=store_history,
silent=silent)
except Exception, ex:
status = 'error'
logging.exception('Exception occured while running cell')
finally:
# Restore raw_input.
if py3compat.PY3:
builtin_mod.input = _sys_raw_input
else:
builtin_mod.raw_input = _sys_raw_input
builtin_mod.input = _sys_eval_input
content = {'status': status,
'execution_count': execution_count}
if status == 'ok':
content['payload'] = []
content['user_variables'] = {}
content['user_expressions'] = {}
elif status == 'error':
content['ename'] = type(ex).__name__
content['evalue'] = str(ex)
content['traceback'] = []
execution_count += 1
if status == 'error':
iopub_output.put(CreateMessage('pyerr', parent_header=request_header,
content={
'execution_count': execution_count,
'ename': type(ex).__name__,
'evalue': str(ex),
'traceback': []
}
))
shell_output.put(CreateMessage('execute_reply', parent_header=request_header,
content=content))
elif msg_type == 'complete_request':
# Taken from IPython 2.x branch, IPython/kernel/zmq/ipykernel.py
txt, matches = _complete(msg)
matches = {'matches' : matches,
'matched_text' : txt,
'status' : 'ok'}
matches = json_clean(matches)
shell_output.put(CreateMessage('complete_reply',
parent_header = request_header,
content = matches))
elif msg_type == 'object_info_request':
# Taken from IPython 2.x branch, IPython/kernel/zmq/ipykernel.py
content = msg['content']
object_info = shell.object_inspect(content['oname'],
detail_level = content.get('detail_level', 0))
# Before we send this object over, we scrub it for JSON usage
oinfo = json_clean(object_info)
shell_output.put(CreateMessage('object_info_reply',
parent_header = request_header,
content = oinfo))
elif msg_type == 'restart':
# break out of this loop, ending this program.
# The main event loop in shell.cc will then
# run this program again.
break
elif msg_type == 'kill':
# Raise an exception so that the function
# running this script will return -1, resulting
# in no restart of this script.
raise RuntimeError
thread.start_new_thread(main_loop, ())
def deal_message(msg):
channel = msg['stream']
content = json.loads(msg['json'])
queues = {'shell': shell_input, 'stdin': stdin_input}
queue = queues[channel]
queue.put(content)
def send_message(stream, msg):
nacl_instance.send_raw_object({
'stream': stream,
'json': json.dumps(msg)
})
while 1:
msg = nacl_instance.wait_for_message(timeout=1, sleeptime=10000)
try:
deal_message(msg)
except:
pass
output_streams = [
(stdin_output, 'stdin'),
(shell_output, 'shell'),
(iopub_output, 'iopub')
]
for msg_queue, stream in output_streams:
msg = None
try:
msg = msg_queue.get_nowait()
send_message(stream, msg)
except Queue.Empty:
pass
| bsd-3-clause |
aquar25/losslessh264 | plot_prior_misses.py | 40 | 1124 | # Run h264dec on a single file compiled with PRIOR_STATS and then run this script
# Outputs timeseries plot at /tmp/misses.pdf
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import os
def temporal_misses(key):
values = data[key]
numbins = 100
binsize = len(values) // numbins
bins = [[]]
for v in values:
if len(bins[-1]) >= binsize:
bins.append([])
bins[-1].append(v)
x = range(len(bins))
total_misses = float(sum(values))
y = [100 * float(sum(b)) / total_misses for b in bins]
return plt.plot(x, y, label=key)[0]
paths = filter(lambda s: 'misses.log' in s, os.listdir('/tmp/'))
data = {p.split('_misses.')[0]: map(lambda c: c == '0', open('/tmp/' + p).read()) for p in paths}
handles = []
plt.figure(figsize=(20,10))
keys = data.keys()
for k in keys:
handles.append(temporal_misses(k))
plt.axis((0, 100, 0, 2))
plt.xlabel('temporal %')
plt.ylabel('% total misses')
plt.legend(handles, keys, bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)
out = PdfPages('/tmp/misses.pdf')
out.savefig()
out.close()
| bsd-2-clause |
omerwe/LEAP | leapUtils.py | 1 | 10998 | import numpy as np
from optparse import OptionParser
import scipy.linalg as la
import scipy.stats as stats
import scipy.linalg.blas as blas
import pandas as pd
import csv
import time
import fastlmm.util.VertexCut as vc
from pysnptools.snpreader.bed import Bed
import pysnptools.util as pstutil
import pysnptools.util.pheno as phenoUtils
np.set_printoptions(precision=3, linewidth=200)
def loadData(bfile, extractSim, phenoFile, missingPhenotype='-9', loadSNPs=False, standardize=True):
bed = Bed(bfile, count_A1=True)
if (extractSim is not None):
f = open(extractSim)
csvReader = csv.reader(f)
extractSnpsSet = set([])
for l in csvReader: extractSnpsSet.add(l[0])
f.close()
keepSnpsInds = [i for i in range(bed.sid.shape[0]) if bed.sid[i] in extractSnpsSet]
bed = bed[:, keepSnpsInds]
phe = None
if (phenoFile is not None): bed, phe = loadPheno(bed, phenoFile, missingPhenotype)
if (loadSNPs):
bed = bed.read()
if (standardize): bed = bed.standardize()
return bed, phe
def loadPheno(bed, phenoFile, missingPhenotype='-9', keepDict=False):
pheno = phenoUtils.loadOnePhen(phenoFile, missing=missingPhenotype, vectorize=True)
checkIntersection(bed, pheno, 'phenotypes')
bed, pheno = pstutil.intersect_apply([bed, pheno])
if (not keepDict): pheno = pheno['vals']
return bed, pheno
def checkIntersection(bed, fileDict, fileStr, checkSuperSet=False):
bedSet = set((b[0], b[1]) for b in bed.iid)
fileSet = set((b[0], b[1]) for b in fileDict['iid'])
if checkSuperSet:
if (not fileSet.issuperset(bedSet)): raise Exception(fileStr + " file does not include all individuals in the bfile")
intersectSet = bedSet.intersection(fileSet)
if (len(intersectSet) != len (bedSet)):
print(len(intersectSet), 'individuals appear in both the plink file and the', fileStr, 'file')
def symmetrize(a):
return a + a.T - np.diag(a.diagonal())
def loadRelatedFile(bed, relFile):
relatedDict = phenoUtils.loadOnePhen(relFile, vectorize=True)
checkIntersection(bed, relatedDict, 'relatedness', checkSuperSet=True)
_, relatedDict = pstutil.intersect_apply([bed, relatedDict])
related = relatedDict['vals']
keepArr = (related < 0.5)
print(np.sum(~keepArr), 'individuals will be removed due to high relatedness')
return keepArr
def findRelated(bed, cutoff, kinshipFile=None):
if (kinshipFile is None):
print('Computing kinship matrix...')
t0 = time.time()
XXT = symmetrize(blas.dsyrk(1.0, bed.val, lower=1) / bed.val.shape[1])
print('Done in %0.2f'%(time.time()-t0), 'seconds')
else:
XXT = np.loadtxt(kinshipFile)
#Find related individuals
removeSet = set(np.sort(vc.VertexCut().work(XXT, cutoff))) #These are the indexes of the IIDs to remove
print('Marking', len(removeSet), 'individuals to be removed due to high relatedness')
#keepArr = np.array([(1 if iid in keepSet else 0) for iid in bed.iid], dtype=bool)
keepArr = np.ones(bed.iid.shape[0], dtype=bool)
for i in removeSet: keepArr[i] = False
return keepArr
def eigenDecompose(XXT, ignore_neig=False):
t0 = time.time()
print('Computing eigendecomposition...')
s,U = la.eigh(XXT)
if (not ignore_neig and (np.min(s) < -1e-4)): raise Exception('Negative eigenvalues found')
s[s<0]=0
ind = np.argsort(s)
ind = ind[s>1e-12]
U = U[:, ind]
s = s[ind]
print('Done in %0.2f'%(time.time()-t0), 'seconds')
return s,U
def loadCovars(bed, covarFile):
covarsDict = phenoUtils.loadPhen(covarFile)
checkIntersection(bed, covarsDict, 'covariates', checkSuperSet=True)
_, covarsDict = pstutil.intersect_apply([bed, covarsDict])
covar = covarsDict['vals']
return covar
def getSNPCovarsMatrix(bed, resfile, pthresh, mindist):
snpNameToNumDict = dict([])
for i,s in enumerate(bed.sid): snpNameToNumDict[s] = i
f = open(resfile)
csvReader = csv.reader(f, delimiter="\t")
next(csvReader)
significantSNPs = []
significantSNPNames = []
lastPval = 0
featuresPosList = []
for l in csvReader:
snpName, pVal = l[0], float(l[4])
if (pVal < lastPval): raise Exception('P-values are not sorted in descending order: ' + str(pVal) + ">" + str(lastPval))
lastPval = pVal
if (pVal > pthresh): break
if (snpName not in snpNameToNumDict): continue
significantSNPNames.append(snpName)
if (mindist == 0):
significantSNPs.append(snpNameToNumDict[snpName])
print('Using SNP', snpName, 'with p<%0.2e'%pVal, 'as a fixed effect')
else:
posArr = bed.pos[snpNameToNumDict[snpName]]
chrom, pos = posArr[0], int(posArr[2])
addSNP = True
for (c,p) in featuresPosList:
if (chrom == c and abs(pos-p) < mindist):
addSNP = False
break
if addSNP:
significantSNPs.append(snpNameToNumDict[snpName])
featuresPosList.append((chrom, pos))
print('Using SNP', snpName, '('+str(int(chrom))+':'+str(pos)+') with p<%0.2e'%pVal, 'as a fixed effect')
f.close()
snpCovarsMat = bed.val[:, significantSNPs]
return snpCovarsMat
def getExcludedChromosome(bfile, chrom):
bed = Bed(bfile, count_A1=True)
indsToKeep = (bed.pos[:,0] != chrom)
bed = bed[:, indsToKeep]
return bed.read().standardize()
def getChromosome(bfile, chrom):
bed = Bed(bfile, count_A1=True)
indsToKeep = (bed.pos[:,0] == chrom)
bed = bed[:, indsToKeep]
return bed.read().standardize()
def _fixupBedAndPheno(bed, pheno, missingPhenotype='-9'):
bed = _fixupBed(bed)
bed, pheno = _fixup_pheno(pheno, bed, missingPhenotype)
return bed, pheno
def _fixupBed(bed):
if isinstance(bed, str):
return Bed(bed, count_A1=True).read().standardize()
else: return bed
def _fixup_pheno(pheno, bed=None, missingPhenotype='-9'):
if (isinstance(pheno, str)):
if (bed is not None):
bed, pheno = loadPheno(bed, pheno, missingPhenotype, keepDict=True)
return bed, pheno
else:
phenoDict = phenoUtils.loadOnePhen(pheno, missing=missingPhenotype, vectorize=True)
return phenoDict
else:
if (bed is not None): return bed, pheno
else: return pheno
def linreg(bed, pheno):
#Extract snps and phenotype
bed, pheno = _fixupBedAndPheno(bed, pheno)
if isinstance(pheno, dict): phe = pheno['vals']
else: phe = pheno
if (len(phe.shape)==2):
if (phe.shape[1]==1): phe=phe[:,0]
else: raise Exception('More than one phenotype found')
#Normalize y. We assume X is already normalized.
y = phe - phe.mean(); y /= y.std()
#Compute p-values
Xy = bed.val.T.dot(y) / y.shape[0]
Xy[Xy>1.0] = 1.0
Xy[Xy<-1.0] = -1.0
df = y.shape[0]-2
TINY = 1.0e-20
t = Xy * np.sqrt(df / ((1.0-Xy+TINY) * (1.0+Xy+TINY)))
pValT = stats.t.sf(np.abs(t), df)*2
#Create pandas data frame
items = [
('SNP', bed.sid),
('Chr', bed.pos[:,0]),
('GenDist', bed.pos[:,1]),
('ChrPos', bed.pos[:,2]),
('PValue', pValT),
]
frame = pd.DataFrame.from_items(items)
frame.sort("PValue", inplace=True)
frame.index = np.arange(len(frame))
return frame
def powerPlot(df, causalSNPs, title=''):
import pylab
causalSNPs = set(causalSNPs)
csnpPvals = df[df['SNP'].isin(causalSNPs)]["PValue"]
pvalPoints = np.logspace(-6, -2, num=1000)
power = [np.mean(csnpPvals < p ) for p in list(pvalPoints)]
pylab.plot(-np.log10(pvalPoints), power)
pylab.xlabel("-log10(Significance Threshold)")
pylab.ylabel("Power")
pylab.title(title)
def computeCovar(bed, shrinkMethod, fitIndividuals):
eigen = dict([])
if (shrinkMethod in ['lw', 'oas', 'l1', 'cv']):
import sklearn.covariance as cov
t0 = time.time()
print('Estimating shrunk covariance using', shrinkMethod, 'estimator...')
if (shrinkMethod == 'lw'): covEstimator = cov.LedoitWolf(assume_centered=True, block_size = 5*bed.val.shape[0])
elif (shrinkMethod == 'oas'): covEstimator = cov.OAS(assume_centered=True)
elif (shrinkMethod == 'l1'): covEstimator = cov.GraphLassoCV(assume_centered=True, verbose=True)
elif (shrinkMethod == 'cv'):
shrunkEstimator = cov.ShrunkCovariance(assume_centered=True)
param_grid = {'shrinkage': [0.01, 0.1, 0.3, 0.5, 0.7, 0.9, 0.99]}
covEstimator = sklearn.grid_search.GridSearchCV(shrunkEstimator, param_grid)
else: raise Exception('unknown covariance regularizer')
covEstimator.fit(bed.val[fitIndividuals, :].T)
if (shrinkMethod == 'l1'):
alpha = covEstimator.alpha_
print('l1 alpha chosen:', alpha)
covEstimator2 = cov.GraphLasso(alpha=alpha, assume_centered=True, verbose=True)
else:
if (shrinkMethod == 'cv'): shrinkEstimator = clf.best_params_['shrinkage']
else: shrinkEstimator = covEstimator.shrinkage_
print('shrinkage estimator:', shrinkEstimator)
covEstimator2 = cov.ShrunkCovariance(shrinkage=shrinkEstimator, assume_centered=True)
covEstimator2.fit(bed.val.T)
XXT = covEstimator2.covariance_ * bed.val.shape[1]
print('Done in %0.2f'%(time.time()-t0), 'seconds')
else:
print('Computing kinship matrix...')
t0 = time.time()
XXT = symmetrize(blas.dsyrk(1.0, bed.val, lower=1))
print('Done in %0.2f'%(time.time()-t0), 'seconds')
try: shrinkParam = float(shrinkMethod)
except: shrinkParam = -1
if (shrinkMethod == 'mylw'):
XXT_fit = XXT[np.ix_(fitIndividuals, fitIndividuals)]
sE2R = (np.sum(XXT_fit**2) - np.sum(np.diag(XXT_fit)**2)) / (bed.val.shape[1]**2)
#temp = (bed.val**2).dot((bed.val.T)**2)
temp = symmetrize(blas.dsyrk(1.0, bed.val[fitIndividuals, :]**2, lower=1))
sER2 = (temp.sum() - np.diag(temp).sum()) / bed.val.shape[1]
shrinkParam = (sER2 - sE2R) / (sE2R * (bed.val.shape[1]-1))
if (shrinkParam > 0):
print('shrinkage estimator:', 1-shrinkParam)
XXT = (1-shrinkParam)*XXT + bed.val.shape[1]*shrinkParam*np.eye(XXT.shape[0])
return XXT
def standardize(X, method, optionsDict):
fitIndividuals = np.ones(X.shape[0], dtype=np.bool)
if (method == 'frq'):
empMean = X.mean(axis=0) / 2.0
X[:, empMean>0.5] = 2 - X[:, empMean>0.5]
print('regularizng SNPs according to frq file...')
frqFile = (optionsDict['bfilesim']+'.frq' if (optionsDict['frq'] is None) else optionsDict['frq'])
mafs = np.loadtxt(frqFile, usecols=[1,2]).mean(axis=1)
snpsMean = 2*mafs
snpsStd = np.sqrt(2*mafs*(1-mafs))
elif (method == 'related'):
if (optionsDict['related'] is None): raise Exception('related file not supplied')
print('regularizng SNPs according to non-related individuals...')
relLines = np.loadtxt(optionsDict['related'], usecols=[2])
keepArr = (relLines != 1)
print('Excluding', np.sum(~keepArr), 'from the covariance matrix standardization')
snpsMean = X[keepArr, :].mean(axis=0)
snpsStd = X[keepArr, :].std(axis=0)
fitIndividuals = keepArr
elif (method == 'controls'):
phe = optionsDict['pheno']
pheThreshold = phe.mean()
controls = (phe<pheThreshold)
print('regularizng SNPs according to controls...')
snpsMean = X[controls, :].mean(axis=0)
snpsStd = X[controls, :].std(axis=0)
fitIndividuals = controls
elif (method is None):
snpsMean = X.mean(axis=0)
snpsStd = X.std(axis=0)
else:
raise Exception('unknown SNP standardization option: ' + method)
X -= snpsMean,
X /= snpsStd
return X, fitIndividuals
| apache-2.0 |
puruckertom/ubertool | ubertool/varroapop/varroapop_functions.py | 1 | 11627 | from __future__ import division #brings in Python 3.0 mixed type calculation rules
import logging
import json
import requests
import math
import pandas as pd
import os
rest_url_varroapop = os.environ.get('OPENCPU_REST_SERVER')
#rest_url_varroapop = 'http://localhost'
if not os.environ.get('OPENCPU_REST_SERVER'):
rest_url_varroapop = 'http://172.20.100.18:5656'
class VarroapopFunctions(object):
"""
Function class for Stir.
"""
def __init__(self):
"""Class representing the functions for VarroaPop"""
super(VarroapopFunctions, self).__init__()
def call_varroapop_api(self):
logging.info("=========== formatting Varroapop JSON payload")
input_json = self.format_varroapop_payload()
logging.info("=========== calling Varroapop windows REST API")
called_endpoint = (rest_url_varroapop + '/ocpu/apps/quanted/VarroaPopWrapper/R/RunVarroaPop/json')
logging.info(called_endpoint)
http_headers = {'Content-Type': 'application/json'}
logging.info("JSON payload:")
print(input_json)
return requests.post(called_endpoint, headers=http_headers, data=input_json, timeout=60)
def fill_model_out_attr(self, output_json):
outputs = json.loads(json.loads(output_json)[0])
self.out_date = self.out_date.append(pd.Series(outputs.get('Date')))
self.out_colony_size = self.out_colony_size.append(pd.Series(outputs.get('Colony.Size')))
self.out_adult_drones = self.out_adult_drones.append(pd.Series(outputs.get('Adult.Drones')))
self.out_adult_workers = self.out_adult_workers.append(pd.Series(outputs.get('Adult.Workers')))
self.out_foragers = self.out_foragers.append(pd.Series(outputs.get('Foragers')))
self.out_capped_drone_brood = self.out_capped_drone_brood.append(pd.Series(outputs.get('Capped.Drone.Brood')))
self.out_capped_worker_brood = self.out_capped_worker_brood.append(pd.Series(outputs.get('Capped.Worker.Brood')))
self.out_drone_larvae = self.out_drone_larvae.append(pd.Series(outputs.get('Drone.Larvae')))
self.out_worker_larvae =self.out_worker_larvae.append(pd.Series(outputs.get('Worker.Larvae')))
self.out_drone_eggs = self.out_drone_eggs.append(pd.Series(outputs.get('Drone.Eggs')))
self.out_worker_eggs = self.out_worker_eggs.append(pd.Series(outputs.get('Worker.Eggs')))
self.out_free_mites = self.out_free_mites.append(pd.Series(outputs.get('Free.Mites')))
self.out_drone_brood_mites =self.out_drone_brood_mites.append(pd.Series(outputs.get('Drone.Brood.Mites')))
self.out_worker_brood_mites =self.out_worker_brood_mites.append(pd.Series(outputs.get('Worker.Brood.Mites')))
self.out_drone_mites_per_cell = self.out_drone_mites_per_cell.append(pd.Series(outputs.get('Mites.Drone.Cell')))
self.out_worker_mites_per_cell = self.out_worker_mites_per_cell.append(pd.Series(outputs.get('Mites.Worker.Cell')))
self.out_mites_dying = self.out_mites_dying.append(pd.Series(outputs.get('Mites.Dying')))
self.out_proportion_mites_dying =self.out_proportion_mites_dying.append(pd.Series(outputs.get('Proportion.Mites.Dying')))
self.out_colony_pollen = self.out_colony_pollen.append(pd.Series(outputs.get('Colony.Pollen..g.')))
self.out_chemical_conc_pollen =self.out_chemical_conc_pollen.append(pd.Series(outputs.get('Pollen.Pesticide.Concentration')))
self.out_colony_nectar = self.out_colony_nectar.append(pd.Series(outputs.get('Colony.Nectar')))
self.out_chemical_conc_nectar =self.out_chemical_conc_nectar.append(pd.Series(outputs.get('Nectar.Pesticide.Concentration')))
self.out_dead_drone_larvae = self.out_dead_drone_larvae.append(pd.Series(outputs.get('Dead.Drone.Larvae')))
self.out_dead_worker_larvae =self.out_dead_worker_larvae.append(pd.Series(outputs.get('Dead.Worker.Larvae')))
self.out_dead_drone_adults = self.out_dead_drone_adults.append(pd.Series(outputs.get('Dead.Drone.Adults')))
self.out_dead_worker_adults = self.out_dead_worker_adults.append(pd.Series(outputs.get('Dead.Worker.Adults')))
self.out_dead_foragers = self.out_dead_foragers.append(pd.Series(outputs.get('Dead.Foragers')))
self.out_queen_strength = self.out_queen_strength.append(pd.Series(outputs.get('Queen.Strength')))
self.out_average_temp_c = self.out_average_temp_c.append(pd.Series(outputs.get('Average.Temperature..celsius.')))
self.out_rain_inch = self.out_rain_inch.append(pd.Series(outputs.get('Rain')))
def fill_summary_stats(self):
self.out_mean_colony_size = self.out_mean_colony_size.append(pd.Series(self.out_colony_size.mean()))
self.out_max_colony_size = self.out_max_colony_size.append(pd.Series(self.out_colony_size.max()))
self.out_min_colony_size = self.out_min_colony_size.append(pd.Series(self.out_colony_size.min()))
self.out_total_bee_mortality = self.out_total_bee_mortality.append(pd.Series(sum([self.out_dead_drone_adults.sum(),
self.out_dead_drone_larvae.sum(),
self.out_dead_worker_adults.sum(),
self.out_dead_worker_larvae.sum(),
self.out_dead_foragers.sum()])))
self.out_max_chemical_conc_pollen = self.out_max_chemical_conc_pollen.append(pd.Series(self.out_chemical_conc_pollen.max()))
self.out_max_chemical_conc_nectar = self.out_max_chemical_conc_nectar.append(pd.Series(self.out_chemical_conc_nectar.max()))
def fill_sessionid(self, sessionid):
self.out_api_sessionid = self.out_api_sessionid.append(pd.Series(sessionid))
def format_varroapop_payload(self):
input_dict = self.pd_obj.to_dict('records')[0]
weather_loc = input_dict.pop('weather_location')
print('Weather location: '+ weather_loc )
input_dict = self.collapse_dates(input_dict)
input_dict = self.rename_inputs(input_dict)
input_dict = self.remove_unused_inputs(input_dict)
data = json.dumps({'parameters':input_dict, 'weather_file':weather_loc})
return data
def collapse_dates(self, input_dict):
sim_start_keys = ['SimStart_month', 'SimStart_day', 'SimStart_year']
input_dict['SimStart'] = "/".join([str(int(input_dict.get(key))) for key in sim_start_keys])
sim_end_keys = ['SimEnd_month', 'SimEnd_day', 'SimEnd_year']
input_dict['SimEnd'] = "/".join([str(int(input_dict.get(key))) for key in sim_end_keys])
requeen_date_keys = ['RQReQueenDate_month', 'RQReQueenDate_day', 'RQReQueenDate_year']
input_dict['RQReQueenDate'] = "/".join([str(int(input_dict.get(key))) for key in requeen_date_keys])
imm_start_keys = ['ImmStart_month', 'ImmStart_day', 'ImmStart_year']
input_dict['ImmStart'] = "/".join([str(int(input_dict.get(key))) for key in imm_start_keys])
imm_end_keys = ['ImmEnd_month', 'ImmEnd_day', 'ImmEnd_year']
input_dict['ImmEnd'] = "/".join([str(int(input_dict.get(key))) for key in imm_end_keys])
vt_treatment_start_keys = ['VTTreatmentStart_month', 'VTTreatmentStart_day', 'VTTreatmentStart_year']
input_dict['VTTreatmentStart'] = "/".join([str(int(input_dict.get(key))) for key in vt_treatment_start_keys])
foliar_app_date_keys = ['FoliarAppDate_month', 'FoliarAppDate_day', 'FoliarAppDate_year']
input_dict['FoliarAppDate'] = "/".join([str(int(input_dict.get(key))) for key in foliar_app_date_keys])
foliar_forage_begin_keys = ['FoliarForageBegin_month', 'FoliarForageBegin_day', 'FoliarForageBegin_year']
input_dict['FoliarForageBegin'] = "/".join([str(int(input_dict.get(key))) for key in foliar_forage_begin_keys])
foliar_forage_end_keys = ['FoliarForageEnd_month', 'FoliarForageEnd_day', 'FoliarForageEnd_year']
input_dict['FoliarForageEnd'] = "/".join([str(int(input_dict.get(key))) for key in foliar_forage_end_keys])
soil_forage_begin_keys = ['SoilForageBegin_month', 'SoilForageBegin_day', 'SoilForageBegin_year']
input_dict['SoilForageBegin'] = "/".join([str(int(input_dict.get(key))) for key in soil_forage_begin_keys])
soil_forage_end_keys = ['SoilForageEnd_month', 'SoilForageEnd_day', 'SoilForageEnd_year']
input_dict['SoilForageEnd'] = "/".join([str(int(input_dict.get(key))) for key in soil_forage_end_keys])
seed_forage_begin_keys = ['SeedForageBegin_month', 'SeedForageBegin_day', 'SeedForageBegin_year']
input_dict['SeedForageBegin'] = "/".join([str(int(input_dict.get(key))) for key in seed_forage_begin_keys])
seed_forage_end_keys = ['SeedForageEnd_month', 'SeedForageEnd_day', 'SeedForageEnd_year']
input_dict['SeedForageEnd'] = "/".join([str(int(input_dict.get(key))) for key in seed_forage_end_keys])
sup_pollen_begin_keys = ['SupPollenBegin_month', 'SupPollenBegin_day', 'SupPollenBegin_year']
input_dict['SupPollenBegin'] = "/".join([str(int(input_dict.get(key))) for key in sup_pollen_begin_keys])
sup_pollen_end_keys = ['SupPollenEnd_month', 'SupPollenEnd_day', 'SupPollenEnd_year']
input_dict['SupPollenEnd'] = "/".join([str(int(input_dict.get(key))) for key in sup_pollen_end_keys])
sup_nectar_begin_keys = ['SupNectarBegin_month', 'SupNectarBegin_day', 'SupNectarBegin_year']
input_dict['SupNectarBegin'] = "/".join([str(int(input_dict.get(key))) for key in sup_nectar_begin_keys])
sup_nectar_end_keys = ['SupNectarEnd_month', 'SupNectarEnd_day', 'SupNectarEnd_year']
input_dict['SupNectarEnd'] = "/".join([str(int(input_dict.get(key))) for key in sup_nectar_end_keys])
inputs_to_remove = sum([sim_start_keys,sim_end_keys,requeen_date_keys,imm_start_keys,
imm_end_keys,vt_treatment_start_keys,foliar_app_date_keys,
foliar_forage_begin_keys, foliar_forage_end_keys,soil_forage_begin_keys,
soil_forage_end_keys, seed_forage_begin_keys, seed_forage_end_keys,
sup_pollen_begin_keys, sup_pollen_end_keys, sup_nectar_begin_keys, sup_nectar_end_keys], [])
[input_dict.pop(k, None) for k in inputs_to_remove]
return input_dict
def rename_inputs(self, input_dict):
input_dict['EAppRate'] = input_dict.pop('ar_lb')
input_dict['AIKOW'] = math.exp(input_dict.pop('l_kow'))
input_dict['AIKOC'] = input_dict.pop('k_oc')
return input_dict
def remove_unused_inputs(self, input_dict):
keys = list(input_dict.keys())
to_remove = [i for i in keys if i[0].islower()]
for k in to_remove:
input_dict.pop(k, None)
return input_dict
def get_input_file(self, api_sessionid):
file_endpoint = (rest_url_varroapop + '/ocpu/tmp/' + api_sessionid + '/files/')
return requests.get(file_endpoint+'vp_input.txt')
def get_log_file(self, api_sessionid):
file_endpoint = (rest_url_varroapop + '/ocpu/tmp/' + api_sessionid + '/files/')
return requests.get(file_endpoint+'vp_log.txt')
def get_results_file(self, api_sessionid):
file_endpoint = (rest_url_varroapop + '/ocpu/tmp/' + api_sessionid + '/files/')
return requests.get(file_endpoint+'vp_results.txt')
| unlicense |
lobnek/pyutil | test/test_mongo/test_engine/test_strategy.py | 1 | 3434 | from pyutil.mongo.engine.strategy import Strategy, strategies, configuration
from pyutil.mongo.engine.symbol import Symbol, Group
from pyutil.performance.drawdown import drawdown
from pyutil.performance.month import monthlytable
from pyutil.performance.return_series import from_nav
from pyutil.portfolio.portfolio import similar
import pandas.testing as pt
from test.config import *
@pytest.fixture()
def group():
Group.objects.delete()
return Group(name="US Equity").save()
@pytest.fixture()
def symbols(group, portfolio):
Symbol.objects.delete()
# add the symbols to database
for symbol in portfolio.assets:
Symbol(name=symbol, group=group).save()
def test_strategy(symbols, portfolio):
Strategy.objects.delete()
s = Strategy(name="mdt", type="mdt", active=True, source="AAA")
assert s.source == "AAA"
assert s.type == "mdt"
assert s.active
assert s.portfolio is None
assert s.last_valid_index is None
# empty dictionary as portfolio hasn't been set
assert Strategy.portfolios(strategies=[s]) == {}
s.save()
frame = Strategy.reference_frame()
assert frame.index.name == "strategy"
s.portfolio = portfolio
pt.assert_frame_equal(s.portfolio.weights, portfolio.weights)
pt.assert_frame_equal(s.portfolio.prices, portfolio.prices)
s.save()
similar(Strategy.portfolios(strategies=[s])["mdt"], portfolio)
navs = Strategy.navs()
assert not navs["mdt"].empty
frame = Strategy.sectors(strategies=[s])
assert frame.index.name == "Portfolio"
assert set(frame.keys()) == {"US Equity", "Total"}
assert frame.loc["mdt"]["US Equity"] == pytest.approx(0.308974, abs=1e-5)
def test_source(portfolio):
with open(resource("source.py"), "r") as f:
s = Strategy(name="Peter", source=f.read(), active=True, type="wild")
# construct the configuration based on the strategy (and it's source code)
c = configuration(strategy=s)
# verify the names of the configuration
assert c.names == portfolio.assets
# also possible to ask the strategy directly
assert s.assets == portfolio.assets
def test_last_valid(portfolio):
s = Strategy(name="Maffay", source="AAA", active=True, type="wild2")
s.portfolio = portfolio
assert s.last_valid_index == portfolio.prices.last_valid_index()
assert similar(s.portfolio, portfolio)
def test_strategies():
folder = resource(name="strat")
for name, source in strategies(folder=folder):
assert name in {"P1", "P2"}
def test_active():
Strategy.objects.delete()
Strategy(name="A", active=False).save()
Strategy(name="B", active=True).save()
assert len(Strategy.active_strategies()) == 1
assert len(Strategy.objects) == 2
def test_drawdown(portfolio):
Strategy.objects.delete()
s = Strategy(name="Maffay", source="")
s.portfolio = portfolio
pt.assert_series_equal(drawdown(portfolio.nav), s.drawdown)
def test_volatility(portfolio):
Strategy.objects.delete()
s = Strategy(name="Maffay", source="")
s.portfolio = portfolio
pt.assert_series_equal(from_nav(portfolio.nav).ewm_volatility().dropna(), s.ewm_volatility())
def test_monthlytable(portfolio):
Strategy.objects.delete()
s = Strategy(name="Maffay", source="")
s.portfolio = portfolio
pt.assert_frame_equal(monthlytable(portfolio.nav.pct_change()), s.monthlytable) | mit |
carlvlewis/bokeh | bokeh/charts/builder/tests/test_line_builder.py | 33 | 2376 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Line
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestLine(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Line, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['x'], [0, 1, 2, 3, 4])
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_pypy'], y_pypy)
assert_array_equal(builder._data['y_jython'], y_jython)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Line, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['x'], [0, 1, 2, 3, 4])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
| bsd-3-clause |
lamotriz/sistemas-de-aterramento | src/agilent_u2531a.py | 1 | 14700 | # -*- coding: utf-8 -*-
# Comunicacao com a placa agilent U2531A
#
# UFC - Universidade de Federal do Ceará
#
# Responsáveis:
# Felipe Bandeira da Silva
# Francisco Alexander
#
from __future__ import division
import platform
#if platform.system() == 'Windows':
# import visa
#else:
# import visa_linux_emulation as visa
try:
import visa
except:
# Durante o processo de instalação normal usando o NSIS, o path do windows
# não estava atualizado com o Python, portanto não era possível, durante a instalação,
# a execução do pip para instalar o "pyvisa" que requer por natureza, várias
# dependências que são simplesmene tratadas pelo pip. Portanto para a primeira
# utilização do programa é necessário a utilização da internet.
#
# Para que tudo funcione corretamente e necessario pyvisa 1.4
#import pip
#pip.main(['install', 'pyvisa'])
import subprocess
print u"aviso: instalando o PyVISA 1.4"
subprocess.call(['pip', 'install', 'PyVISA==1.4'])
print u"aviso: instalacao finalizada"
import visa
import matplotlib.pyplot as plt
from time import sleep, time, asctime, localtime
import numpy as np
###############################################################################
# Constantes para correçao. As mesmas usadas pelo programa feito no LabView
###############################################################################
FATOR_CORRECAO_TENSAO = 100
FATOR_CORRECAO_CORRENTE = 2.71
# 0 - nao mostra as mensagens
# 1 - mostras as mensagens para debug
DEBUG = 0
# um pequeno pulso inicial é visto no inicio da
# aquisição, puro ruido. Para que o sinal seja
# visualizado corretamento foi necessário aumentar
# o número de aquisições. Isso implica em uma
# aquisição mais demorada.
#QUANTIDADE_PONTOS = 50000
QUANTIDADE_PONTOS = 800000
###############################################################################
# testBit() returns a nonzero result, 2**offset, if the bit at 'offset' is one.
def testBit(int_type, offset):
mask = 1 << offset
return(int_type & mask)
# setBit() returns an integer with the bit at 'offset' set to 1.
def setBit(int_type, offset):
mask = 1 << offset
return(int_type | mask)
# clearBit() returns an integer with the bit at 'offset' cleared.
def clearBit(int_type, offset):
mask = ~(1 << offset)
return(int_type & mask)
# toggleBit() returns an integer with the bit at 'offset' inverted, 0 -> 1 and 1 -> 0.
def toggleBit(int_type, offset):
mask = 1 << offset
return(int_type ^ mask)
def lerEndian(data):
"""
Converte um sequencia de dados em valores de 2 bytes
A sequencia de entrada é dada no formato little-endian
com entrada do 13 bit para o carry.
Entrada:
data = string pura com informacoes do bloco de bytes
Saída:
t = tamanho do vetor de bytes
v = valores em um vetor
"""
raw = data[10:]
valores = []
passo = 0
for i in raw:
if passo == 0:
lsb = i
passo = 1
elif passo == 1:
msb = i
passo = 0
num = ((ord(msb)<<8)+(ord(lsb)))>>2
#print hex(num)
valores.append(num)
return [len(valores), valores]
def ler2Endian(data):
"""
Ler um bloco de bytes composto por duas leitura simultaneas do canal.
"""
raw = data[10:]
A = []
B = []
passo = 0
for i in raw:
if passo == 0:
lsb = i
passo = 1
elif passo == 1:
msb = i
passo = 2
A.append(((ord(msb)<<8)+(ord(lsb)))>>2)
elif passo == 2:
lsb = i
passo = 3
elif passo == 3:
msb = i
passo = 0
B.append(((ord(msb)<<8)+(ord(lsb)))>>2)
return [len(A), A, B]
def convBIP(raw, range_ad=10, resolution=14):
v = []
for i in raw:
v.append( (2*i)/(2**resolution) * range_ad )
return v
def convUNI(raw, range_ad=10, resolution=14):
v = []
for i in raw:
# se o 13 bit do byte for 1 então o número é "negativo"
# a conversão unipolar é dada por
# MAX = 1FFF
# MAX/2 = 0000
# 0 = 2000
if testBit(i, 13) > 0:
valor = clearBit(i, 13) - (2**14)/2
v.append( (valor/(2**resolution) + 0.5)*range_ad )
else:
v.append( (i/(2**resolution) + 0.5)*range_ad )
return v
def lerTensaoCorrente(ag):
"""
Faz a leitura de dois canais de forma simultanea
Canal 101(corrente) e 102(tensão)
"""
# reseta a placa a de aquisição
ag.write("*CLS")
ag.write("*RST")
ag.write("ROUT:ENAB 0,(@103, 104)") # desabilita os canais 103 e 104
ag.write("ROUT:ENAB 1,(@101, 102)") # habilita os canais 101 e 102
ag.write("ROUT:CHAN:RANG 10,(@101, 102)") # coloca no mesmo nivel que o programa da National
ag.write("ROUT:CHAN:POL UNIP,(@101, 102)") # unipolar
ag.write("ACQ:SRAT 2000000") # frequencia de amostragem
#ag.write("ACQ:POIN 2000000")
#ag.write("ACQ:POIN 50000") # número de pontos para aquisição
ag.write("ACQ:POIN %d" % QUANTIDADE_PONTOS)
####################
# inicia aquisicao #
####################
ag.write("DIG")
disparaTensao(ag)
#ag.write("DIG")
while True:
ag.write("WAV:COMP?")
if ag.read() == 'YES':
break
sleep(0.2) # espera um tempo até que amostra fique pronta
# Uma pequena mudança no capacitor do primeiro 555
# faz com que o set e reset necessitem de um tempo
# maior para que ambos acontecam.
sleep(.2)
retiraTensao(ag)
ag.write("WAV:DATA?")
dados = ag.read()
t, I, V = ler2Endian(dados)
V = convUNI(V, 10)
I = convUNI(I, 10)
return [dados, V, I]
def lerTensao(ag):
"""
Ler apenas o canal de tensão da fonte. Canal 102
Com toda a sequencia de acionamento do set e reset.
"""
# reset
ag.write("*CLS")
ag.write("*RST")
# inicia a leitura do canal 102 tensao
ag.write("ROUT:ENAB 0,(@103, 101, 104)")
ag.write("ROUT:ENAB 1,(@102)")
ag.write("ROUT:CHAN:RANG 10,(@102)") # coloca no mesmo nivel que o programa da National
ag.write("ROUT:CHAN:POL UNIP,(@102)")
ag.write("ACQ:SRAT 2000000")
#ag.write("ACQ:POIN 2000000")
#ag.write("ACQ:POIN 50000")
# um pequeno pulso inicial é visto no inicio da
# aquisição, puro ruido. Para que o sinal seja
# visualizado corretamento foi necessário aumentar
# o número de aquisições. Isso implica em uma
# aquisição mais demorada.
ag.write("ACQ:POIN %d" % (QUANTIDADE_PONTOS))
# inicia aquisicao
ag.write("DIG")
disparaTensao(ag)
while True:
ag.write("WAV:COMP?")
if ag.read() == 'YES':
break
sleep(0.5)
ag.write("WAV:DATA?")
dados = ag.read()
sleep(.2)
retiraTensao(ag)
#print dados
t, R = lerEndian(dados)
V = convUNI(R, 10)
plt.grid()
plt.plot(range(0, t), V)
plt.show()
return t, V
def lerCorrente(ag):
"""
Ler apenas o canal de corrente da fonte. Canal 101
Com toda a sequencia de acionamento do set e reset.
"""
# reset
ag.write("*CLS")
ag.write("*RST")
# inicia a leitura do canal 101 corrente
ag.write("ROUT:ENAB 0,(@103, 102, 104)")
ag.write("ROUT:ENAB 1,(@101)")
ag.write("ROUT:CHAN:RANG 10,(@101)")
ag.write("ROUT:CHAN:POL UNIP,(@101)")
ag.write("ACQ:SRAT 2000000")
ag.write("ACQ:POIN 2000000")
# inicia aquisicao
ag.write("DIG")
disparaTensao(ag)
while True:
ag.write("WAV:COMP?")
if ag.read() == 'YES':
break
sleep(0.5)
ag.write("WAV:DATA?")
dados = ag.read()
sleep(.2)
retiraTensao(ag)
#print dados
t, R = lerEndian(dados)
V = convUNI(R, 10)
plt.grid()
plt.plot(range(0, t), V)
plt.show()
return t, V
def lerCanal103(ag):
"""
Este canal foi usado para os testes iniciais da conversão
do análogico digital. Não sendo mais necessário.
As funçoes para leitura de tensão e corrente são identicas
a esta funçao. Mudando apenas o canal.
"""
# reset
ag.write("*CLS")
ag.write("*RST")
# inicia a leitura do canal 103
ag.write("ROUT:ENAB 0,(@101, 102, 104)")
ag.write("ROUT:ENAB 1,(@103)")
ag.write("ROUT:CHAN:RANG 10,(@103)")
#ag.write("ROUT:CHAN:POL BIP,(@103)")
ag.write("ROUT:CHAN:POL UNIP,(@103)")
ag.write("ACQ:SRAT 2000000")
ag.write("ACQ:POIN 2000000")
# inicia aquisicao
ag.write("DIG")
# espera o fim
disparaTensao(ag)
while True:
ag.write("WAV:COMP?")
if ag.read() == 'YES':
break
sleep(0.1)
ag.write("WAV:DATA?")
dados = ag.read()
sleep(.2)
retiraTensao(ag)
#print dados
t, R = lerEndian(dados)
V = convUNI(R)
plt.grid()
plt.plot(range(0, t), V)
return t, V
def disparaTensao(ag):
"""
Envia um pulso de alta tensão para o sistema de aterramento.
Acionando para isto o primeiro 555.
Os pulso não deve ser enviando em um curto intervalo de tempo
já que a fonte não foi projetada para tal situaçao.
Portanto deve-se tormar cuidado no acionamento sequencia.
SET - Pino 68 na placa U2901-60602
RESET - Pino 34 na placa U2901-60602
"""
ag.write("CONF:DIG:DIR OUTP,(@501)")
ag.write("SOUR:DIG:DATA 1,(@501)")
return 0
def retiraTensao(ag):
"""
Reseta a fonte. Habilitando a mesma para um novo envio
de um pulso de alta tensão.
"""
ag.write("CONF:DIG:DIR OUTP,(@501)")
ag.write("SOUR:DIG:DATA 0,(@501)") # desabilita o set
sleep(0.1) # espera um tempo para resetar
ag.write("SOUR:DIG:DATA 2,(@501)") # reseta a fonte
sleep(0.1) # espera um tempo para entrar em repouso
ag.write("SOUR:DIG:DATA 0,(@501)") # entra em repouso
return 0
def pltTensaoCorrente(V, I):
t1 = np.arange(0, len(V))
plt.figure(1)
plt.title("Leitura do U2531A")
plt.subplot(211)
plt.plot(t1, V)
plt.subplot(212)
plt.plot(t1, I)
plt.show()
def aplicaCorrecoes(V, I):
V = np.array(V)
V = FATOR_CORRECAO_TENSAO * V
I = np.array(I)
I = FATOR_CORRECAO_CORRENTE * I
return [V, I]
def sequenciaAquisicoes(ag, quantidade, local="C:\\Temp", rotulo = '0'):
"""
Faz um aquisiçao sequencial dos canais de tensão e corrente.
ag = objeto usada para o controle da placa
"""
print "Iniciando aquisicao sequencial"
print "Equipamento = ", ag
print "quantidade = ", quantidade
print "Tempo de inicio = ", asctime()
tempoInicio = time()
contagem = quantidade
plt.figure(1)
while quantidade > 0:
print "Atual = ", quantidade
tempoIndividual = time()
# inicia aquisição
raw, V, I = lerTensaoCorrente(ag)
V, I = aplicaCorrecoes(V, I)
# não é uma boa ideia plotar desta forma
#pltTensaoCorrente(V, I)
plt.subplot(211)
plt.plot(np.arange(0, len(V)), V)
plt.subplot(212)
plt.plot(np.arange(0, len(I)), I)
salvaTensaoTXT(local, rotulo, contagem-quantidade+1, V)
salvaCorrenteTXT(local, rotulo, contagem-quantidade+1, I)
print "Individual = ", time()-tempoIndividual
quantidade -=1
total = time()-tempoInicio
print 'Completo em [seg]: ', total
plt.show()
return 0
def salvaTensaoTXT(local, rotulo, posicao, V):
"""
Salva o vetor tensão em um arquivo com nome formatado para isso
"""
nomeCompleto = local+"\\"+rotulo+"V"+str(posicao)+".txt"
return salvaTXT(nomeCompleto, V)
def salvaCorrenteTXT(local, rotulo, posicao, I):
"""
Salva o vetor corrente em um arquivo com nome formatado para isso
"""
nomeCompleto = local+"\\"+rotulo+"I"+str(posicao)+".txt"
return salvaTXT(nomeCompleto, I)
def salvaTXT(caminhoCompleto, vetor):
"""
Salva em um arquivo txt os valores de um vetor
onde a primeira coluna informa o indice e a segunda
coluna informa o valor para o indice.
"""
try:
arquivo = open(caminhoCompleto, 'w')
except:
print 'erro: nao foi possivel escrever no arquivo'
print ' : ', caminhoCompleto
return -1
#for i in range(len(vetor)):
# string = "%d %f\n" % (i, float(vetor[i]))
# arquivo.write(string)
for i in vetor:
arquivo.write(i)
arquivo.close()
# escrita finalizada com sucesso
return 0
def buscaAgilent():
"""
Busca o equipamento conectado a porta usb do computador
Retornando o objeto a ser usado pelas funções de controle
da placa de aquisiçao da agilent.
"""
listaInstrumentos = visa.get_instruments_list() # adquiri a lista de equipamentos conectados ao computador
listaAgilent = listaInstrumentos[0] # pega o primeiro equipamento
print 'Lista de instrumentos:'
print listaAgilent # espera-se que o equipamento seja da agilent
ag = visa.instrument(listaAgilent) # cria um objeto a ser manipulado e passado para as outras funções
identificacao = ag.ask("*IDN?")
print identificacao
return ag
###############################################################################
# MAIN #
###############################################################################
if __name__ == '__main__':
print 'Agilente U3125A'
ag = buscaAgilent()
##############################
# leitura de apenas um canal #
##############################
#lerCanal103(ag)
#lerTensao(ag)
#lerCorrente(ag)
##########################
# leitura de dois canais #
##########################
raw, V, I = lerTensaoCorrente(ag)
V, I = aplicaCorrecoes(V, I)
pltTensaoCorrente(V, I)
#########################
# Aquisiçoes sequencial #
#########################
# 60 aquisições
# local onde é salvo "C:\Temp"
#sequenciaAquisicoes(ag, 10)
| apache-2.0 |
xyguo/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 53 | 2668 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
cgarrard/osgeopy-code | Chapter13/listing13_4.py | 1 | 1939 | # Script to draw world countries as patches.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
from osgeo import ogr
def order_coords(coords, clockwise):
"""Orders coordinates."""
total = 0
x1, y1 = coords[0]
for x, y in coords[1:]:
total += (x - x1) * (y + y1)
x1, y1 = x, y
x, y = coords[0]
total += (x - x1) * (y + y1)
is_clockwise = total > 0
if clockwise != is_clockwise:
coords.reverse()
return coords
def make_codes(n):
"""Makes a list of path codes."""
codes = [Path.LINETO] * n
codes[0] = Path.MOVETO
return codes
def plot_polygon_patch(poly, color):
"""Plots a polygon as a patch."""
# Outer clockwise path.
coords = poly.GetGeometryRef(0).GetPoints()
coords = order_coords(coords, True)
codes = make_codes(len(coords))
for i in range(1, poly.GetGeometryCount()):
# Inner counter-clockwise paths.
coords2 = poly.GetGeometryRef(i).GetPoints()
coords2 = order_coords(coords2, False)
codes2 = make_codes(len(coords2))
# Concatenate the paths.
coords = np.concatenate((coords, coords2))
codes = np.concatenate((codes, codes2))
# Add the patch to the plot
path = Path(coords, codes)
patch = patches.PathPatch(path, facecolor=color)
plt.axes().add_patch(patch)
# Loop through all of the features in the countries layer and create
# patches for the polygons.
ds = ogr.Open(r'D:\osgeopy-data\global\ne_110m_admin_0_countries.shp')
lyr = ds.GetLayer(0)
for row in lyr:
geom = row.geometry()
if geom.GetGeometryType() == ogr.wkbPolygon:
plot_polygon_patch(geom, 'yellow')
elif geom.GetGeometryType() == ogr.wkbMultiPolygon:
for i in range(geom.GetGeometryCount()):
plot_polygon_patch(geom.GetGeometryRef(i), 'yellow')
plt.axis('equal')
plt.show()
| mit |
tillahoffmann/tensorflow | tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py | 130 | 9577 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.metrics.python.ops import histogram_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class Strict1dCumsumTest(test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = constant_op.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = constant_op.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = constant_op.constant([3], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = constant_op.constant([1, 3, 6], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = constant_op.constant([], shape=[0], dtype=dtypes.bool)
scores = constant_op.constant([], shape=[0], dtype=dtypes.float32)
score_range = [0, 1.]
auc, update_op = histogram_ops.auc_using_histogram(labels, scores,
score_range)
variables.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(
nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(
nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(
nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(
nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(
nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = array_ops.placeholder(dtypes.bool, shape=[num_records])
scores = array_ops.placeholder(dtypes.float32, shape=[num_records])
auc, update_op = histogram_ops.auc_using_histogram(
labels, scores, score_range, nbins=nbins)
variables.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
test.main()
| apache-2.0 |
arthurmensch/modl | benchmarks/log.py | 1 | 2179 | import time
import numpy as np
from lightning.impl.primal_cd import CDClassifier
from lightning.impl.sag import SAGAClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from lightning.classification import SAGClassifier
from sklearn.linear_model import LogisticRegression
bunch = fetch_20newsgroups_vectorized(subset="all")
X = bunch.data
y = bunch.target
y[y >= 1] = 1
alpha = 1e-3
n_samples = X.shape[0]
sag = SAGClassifier(eta='auto',
loss='log',
alpha=alpha,
tol=1e-10,
max_iter=1000,
verbose=1,
random_state=0)
saga = SAGAClassifier(eta='auto',
loss='log',
alpha=alpha,
tol=1e-10,
max_iter=1000,
verbose=1,
random_state=0)
cd_classifier = CDClassifier(loss='log',
alpha=alpha / 2,
C=1 / n_samples,
tol=1e-10,
max_iter=100,
verbose=1,
random_state=0)
sklearn_sag = LogisticRegression(tol=1e-10, max_iter=1000,
verbose=2, random_state=0,
C=1. / (n_samples * alpha),
solver='sag',
penalty='l2',
fit_intercept=False)
classifiers = [{'name': 'Lightning SAG', 'estimator': sag},
{'name': 'Lightning SAGA', 'estimator': saga},
{'name': 'Sklearn SAG', 'estimator': sklearn_sag},
{'name': 'Lightning CD', 'estimator': cd_classifier},
]
start = time.time()
for classifier in classifiers:
print(classifier['name'])
clf = classifier['estimator']
clf.fit(X, y)
print("Training time", time.time() - start)
print("Accuracy", np.mean(clf.predict(X) == y))
n_nz = np.sum(np.sum(clf.coef_ != 0, axis=0, dtype=bool))
n_nz /= clf.coef_.size
print(clf.coef_)
print('Non-zero', n_nz)
| bsd-2-clause |
phev8/ward-metrics | wardmetrics/visualisations.py | 1 | 16641 | import matplotlib.pyplot as plt
def plot_events_with_segment_scores(segment_results, ground_truth_events, detected_events, use_datetime_x=False, show=True):
"""
Test
:param segment_results:
:param ground_truth_events:
:param detected_events:
:param use_datetime_x:
:param show:
:return:
"""
fig = plt.figure(figsize=(10, 3))
a = 3
# TODO: convert times to datetime if flag is set
# write y axis labels for ground truth and detections
plt.yticks([0.2, 0.5, 0.8], ["detections", "segment score", "actual events"])
plt.ylim([0, 1])
for d in detected_events:
plt.axvspan(d[0], d[1], 0, 0.5)
for gt in ground_truth_events:
plt.axvspan(gt[0], gt[1], 0.5, 1)
for s in segment_results:
color = "black"
index_of_cat = 4
if s[index_of_cat] == "TP":
color = "green"
elif s[index_of_cat] == "FP":
color = "red"
elif s[index_of_cat] == "FN":
color = "yellow"
elif s[index_of_cat] == "TN":
color = "blue"
# TODO: format text nicely
plt.text((s[1]+s[0])/2, 0.8, s[2], horizontalalignment='center', verticalalignment='center')
plt.text((s[1]+s[0])/2, 0.2, s[3], horizontalalignment='center', verticalalignment='center')
plt.text((s[1]+s[0])/2, 0.5, s[5], horizontalalignment='center', verticalalignment='center')
plt.axvspan(s[0], s[1], 0.4, 0.6, color=color)
plt.axvline(s[0], color="black")
plt.axvline(s[1], color="black")
plt.tight_layout()
if show:
plt.show()
else:
plt.draw()
def plot_events_with_event_scores(gt_event_scores, detected_event_scores, ground_truth_events, detected_events, show=True):
fig = plt.figure(figsize=(10, 3))
for i in range(len(detected_events)):
d = detected_events[i]
plt.axvspan(d[0], d[1], 0, 0.5)
plt.text((d[1] + d[0]) / 2, 0.2, detected_event_scores[i], horizontalalignment='center', verticalalignment='center')
for i in range(len(ground_truth_events)):
gt = ground_truth_events[i]
plt.axvspan(gt[0], gt[1], 0.5, 1)
plt.text((gt[1] + gt[0]) / 2, 0.8, gt_event_scores[i], horizontalalignment='center', verticalalignment='center')
plt.tight_layout()
if show:
plt.show()
else:
plt.draw()
def plot_twoset_metrics(results, startangle=120):
fig1, axarr = plt.subplots(1, 2)
# plot positive rates:
labels_1 = ["tpr", "us", "ue", "fr", "dr"]
values_1 = [
results["tpr"],
results["us"],
results["ue"],
results["fr"],
results["dr"]
]
axarr[0].pie(values_1, labels=labels_1, autopct='%1.0f%%', startangle=startangle)
axarr[0].axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
# TODO: add title
# plot negative rates:
labels_2 = ["1-fpr", "os", "oe", "mr", "ir"]
values_2 = [
1-results["fpr"],
results["os"],
results["oe"],
results["mr"],
results["ir"]
]
axarr[1].pie(values_2, labels=labels_2, autopct='%1.0f%%', startangle=startangle)
axarr[1].axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
# TODO: add title
plt.show()
def plot_segment_counts(results):
# TODO: add title
labels = results.keys()
values = []
for label in labels:
values.append(results[label])
#explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
total = sum(values)
fig1, ax1 = plt.subplots()
ax1.pie(values, labels=labels, autopct=lambda p: '{:.0f}'.format(p * total / 100), startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
def plot_event_analysis_diagram(event_results, **kwargs):
""" Plot the event analysis diagram (EAD) for the given results
Visualisation of the distribution of specific error types either with the actual event count or
showing the percentage of the total events. Elements of the plot can be adjusted (like color, fontsize etc.)
Args:
event_results (dictionary): Dictionary containing event counts for "total_gt", "total_det", "D", "F", "FM", "M",
"C", "M'", "FM'", "F'", "I'" as returned by core_methods.event_metrics' third value
Keyword Arguments:
fontsize (int): Size of the text inside the bar plot (Reduce the value if some event types are too short)
use_percentage (bool): whether percentage values or to show actual event counts on the chart (default: False)
show (bool): whether to call plt.show (blocking) or plt.draw() for later displaying (default: True)
color_deletion: any matplotlib color for deletion events
color_fragmented: any matplotlib color for fragmented ground truth events
color_fragmented_merged: any matplotlib color for merged and fragmented ground truth events
color_merged: any matplotlib color for merged ground truth events
color_correct: any matplotlib color for correct events
color_merging: any matplotlib color for merging detection events
color_merging_fragmenting: any matplotlib color for merging and fragmenting detection events
color_fragmenting: any matplotlib color for merging detection events
color_insertion: any matplotlib color for insertion events
Returns:
matplotlib Figure: matplotlib figure reference
"""
fig = plt.figure(figsize=(10, 2))
total = event_results["total_gt"] + event_results["total_det"] - event_results["C"]
# Layout settings:
y_min = 0.3
y_max = 0.7
width = 0.02
text_x_offset = 0
text_y_pos_1 = 0.55
text_y_pos_2 = 0.4
fontsize = kwargs.pop('fontsize', 10)
fontsize_extern = 12
use_percentage = kwargs.pop('use_percentage', False)
# Color settings:
cmap = plt.get_cmap("Paired")
color_deletion = kwargs.pop('color_deletion', cmap(4))
color_fragmented = kwargs.pop('color_fragmented', cmap(6))
color_fragmented_merged = kwargs.pop('color_fragmented_merged', cmap(0))
color_merged = kwargs.pop('color_merged', cmap(8))
color_correct = kwargs.pop('color_correct', cmap(3))
color_merging = kwargs.pop('color_merging', cmap(9))
color_merging_fragmenting = kwargs.pop('color_merging_fragmenting', cmap(1))
color_fragmenting = kwargs.pop('color_fragmenting', cmap(7))
color_insertion = kwargs.pop('color_insertion', cmap(5))
# Show deletions:
current_score = "D"
current_x_start = 0
current_x_end = event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_deletion)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, "{:.0f}".format(event_results[current_score]*100/event_results["total_gt"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show fragmented events:
current_score = "F"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_fragmented)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show fragmented and merged events:
current_score = "FM"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_fragmented_merged)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show merged events:
current_score = "M"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_merged)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show correct events:
current_score = "C"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_correct)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%/" + "{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show merging detections:
current_score = "M'"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_merging)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show fragmenting and merging detections:
current_score = "FM'"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_merging_fragmenting)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show fragmenting detections:
current_score = "F'"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_fragmenting)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show insertions:
current_score = "I'"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_insertion)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Draw line for total events:
plt.axvspan(0, event_results["total_gt"], y_max, y_max + width, color="black")
plt.axvspan( total - event_results["total_det"], total, y_min, y_min - width, color="black")
plt.text((0 + event_results["total_gt"]) / 2, 0.8, "Actual events (total=" + str(event_results["total_gt"]) + ")",
fontsize=fontsize_extern, horizontalalignment='center', verticalalignment='center')
plt.text((2*total - event_results["total_det"]) / 2, 0.18, "Detected events (total=" + str(event_results["total_det"]) + ")",
horizontalalignment='center', fontsize=fontsize_extern, verticalalignment='center')
plt.tight_layout()
if kwargs.pop('show', True):
plt.show()
else:
plt.draw()
return fig
| mit |
cloud9ers/gurumate | environment/share/doc/ipython/examples/parallel/options/mcpricer.py | 2 | 3552 | # <nbformat>2</nbformat>
# <markdowncell>
# # Parallel Monto-Carlo options pricing
# <markdowncell>
# ## Problem setup
# <codecell>
from __future__ import print_function
import sys
import time
from IPython.parallel import Client
import numpy as np
from mckernel import price_options
from matplotlib import pyplot as plt
# <codecell>
cluster_profile = "default"
price = 100.0 # Initial price
rate = 0.05 # Interest rate
days = 260 # Days to expiration
paths = 10000 # Number of MC paths
n_strikes = 6 # Number of strike values
min_strike = 90.0 # Min strike price
max_strike = 110.0 # Max strike price
n_sigmas = 5 # Number of volatility values
min_sigma = 0.1 # Min volatility
max_sigma = 0.4 # Max volatility
# <codecell>
strike_vals = np.linspace(min_strike, max_strike, n_strikes)
sigma_vals = np.linspace(min_sigma, max_sigma, n_sigmas)
# <markdowncell>
# ## Parallel computation across strike prices and volatilities
# <markdowncell>
# The Client is used to setup the calculation and works with all engines.
# <codecell>
c = Client(profile=cluster_profile)
# <markdowncell>
# A LoadBalancedView is an interface to the engines that provides dynamic load
# balancing at the expense of not knowing which engine will execute the code.
# <codecell>
view = c.load_balanced_view()
# <codecell>
print("Strike prices: ", strike_vals)
print("Volatilities: ", sigma_vals)
# <markdowncell>
# Submit tasks for each (strike, sigma) pair.
# <codecell>
t1 = time.time()
async_results = []
for strike in strike_vals:
for sigma in sigma_vals:
ar = view.apply_async(price_options, price, strike, sigma, rate, days, paths)
async_results.append(ar)
# <codecell>
print("Submitted tasks: ", len(async_results))
# <markdowncell>
# Block until all tasks are completed.
# <codecell>
c.wait(async_results)
t2 = time.time()
t = t2-t1
print("Parallel calculation completed, time = %s s" % t)
# <markdowncell>
# ## Process and visualize results
# <markdowncell>
# Get the results using the `get` method:
# <codecell>
results = [ar.get() for ar in async_results]
# <markdowncell>
# Assemble the result into a structured NumPy array.
# <codecell>
prices = np.empty(n_strikes*n_sigmas,
dtype=[('ecall',float),('eput',float),('acall',float),('aput',float)]
)
for i, price in enumerate(results):
prices[i] = tuple(price)
prices.shape = (n_strikes, n_sigmas)
# <markdowncell>
# Plot the value of the European call in (volatility, strike) space.
# <codecell>
plt.figure()
plt.contourf(sigma_vals, strike_vals, prices['ecall'])
plt.axis('tight')
plt.colorbar()
plt.title('European Call')
plt.xlabel("Volatility")
plt.ylabel("Strike Price")
# <markdowncell>
# Plot the value of the Asian call in (volatility, strike) space.
# <codecell>
plt.figure()
plt.contourf(sigma_vals, strike_vals, prices['acall'])
plt.axis('tight')
plt.colorbar()
plt.title("Asian Call")
plt.xlabel("Volatility")
plt.ylabel("Strike Price")
# <markdowncell>
# Plot the value of the European put in (volatility, strike) space.
# <codecell>
plt.figure()
plt.contourf(sigma_vals, strike_vals, prices['eput'])
plt.axis('tight')
plt.colorbar()
plt.title("European Put")
plt.xlabel("Volatility")
plt.ylabel("Strike Price")
# <markdowncell>
# Plot the value of the Asian put in (volatility, strike) space.
# <codecell>
plt.figure()
plt.contourf(sigma_vals, strike_vals, prices['aput'])
plt.axis('tight')
plt.colorbar()
plt.title("Asian Put")
plt.xlabel("Volatility")
plt.ylabel("Strike Price")
# <codecell>
plt.show()
| lgpl-3.0 |
mattilyra/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
cactusbin/nyt | matplotlib/lib/matplotlib/tests/test_text.py | 2 | 6893 | from __future__ import print_function
import numpy as np
import matplotlib
from matplotlib.testing.decorators import image_comparison, knownfailureif, cleanup
import matplotlib.pyplot as plt
import warnings
from nose.tools import with_setup
@image_comparison(baseline_images=['font_styles'])
def test_font_styles():
from matplotlib import _get_data_path
data_path = _get_data_path()
def find_matplotlib_font(**kw):
prop = FontProperties(**kw)
path = findfont(prop, directory=data_path)
return FontProperties(fname=path)
from matplotlib.font_manager import FontProperties, findfont
warnings.filterwarnings('ignore','findfont: Font family \[\'Foo\'\] '+ \
'not found. Falling back to .',
UserWarning,
module='matplotlib.font_manager')
fig = plt.figure()
ax = plt.subplot( 1, 1, 1 )
normalFont = find_matplotlib_font( family = "sans-serif",
style = "normal",
variant = "normal",
size = 14,
)
ax.annotate( "Normal Font", (0.1, 0.1), xycoords='axes fraction',
fontproperties = normalFont )
boldFont = find_matplotlib_font( family = "Foo",
style = "normal",
variant = "normal",
weight = "bold",
stretch = 500,
size = 14,
)
ax.annotate( "Bold Font", (0.1, 0.2), xycoords='axes fraction',
fontproperties = boldFont )
boldItemFont = find_matplotlib_font( family = "sans serif",
style = "italic",
variant = "normal",
weight = 750,
stretch = 500,
size = 14,
)
ax.annotate( "Bold Italic Font", (0.1, 0.3), xycoords='axes fraction',
fontproperties = boldItemFont )
lightFont = find_matplotlib_font( family = "sans-serif",
style = "normal",
variant = "normal",
weight = 200,
stretch = 500,
size = 14,
)
ax.annotate( "Light Font", (0.1, 0.4), xycoords='axes fraction',
fontproperties = lightFont )
condensedFont = find_matplotlib_font( family = "sans-serif",
style = "normal",
variant = "normal",
weight = 500,
stretch = 100,
size = 14,
)
ax.annotate( "Condensed Font", (0.1, 0.5), xycoords='axes fraction',
fontproperties = condensedFont )
ax.set_xticks([])
ax.set_yticks([])
@image_comparison(baseline_images=['multiline'])
def test_multiline():
fig = plt.figure()
ax = plt.subplot(1, 1, 1)
ax.set_title("multiline\ntext alignment")
plt.text(0.2, 0.5, "TpTpTp\n$M$\nTpTpTp", size=20,
ha="center", va="top")
plt.text(0.5, 0.5, "TpTpTp\n$M^{M^{M^{M}}}$\nTpTpTp", size=20,
ha="center", va="top")
plt.text(0.8, 0.5, "TpTpTp\n$M_{q_{q_{q}}}$\nTpTpTp", size=20,
ha="center", va="top")
plt.xlim(0, 1)
plt.ylim(0, 0.8)
ax.set_xticks([])
ax.set_yticks([])
@image_comparison(baseline_images=['antialiased'], extensions=['png'])
def test_antialiasing():
matplotlib.rcParams['text.antialiased'] = True
fig = plt.figure(figsize=(5.25, 0.75))
fig.text(0.5, 0.75, "antialiased", horizontalalignment='center',
verticalalignment='center')
fig.text(0.5, 0.25, "$\sqrt{x}$", horizontalalignment='center',
verticalalignment='center')
# NOTE: We don't need to restore the rcParams here, because the
# test cleanup will do it for us. In fact, if we do it here, it
# will turn antialiasing back off before the images are actually
# rendered.
def test_afm_kerning():
from matplotlib.afm import AFM
from matplotlib.font_manager import findfont
fn = findfont("Helvetica", fontext="afm")
with open(fn, 'rb') as fh:
afm = AFM(fh)
assert afm.string_width_height('VAVAVAVAVAVA') == (7174.0, 718)
@image_comparison(baseline_images=['text_contains'], extensions=['png'])
def test_contains():
import matplotlib.backend_bases as mbackend
fig = plt.figure()
ax = plt.axes()
mevent = mbackend.MouseEvent('button_press_event', fig.canvas, 0.5,
0.5, 1, None)
xs = np.linspace(0.25, 0.75, 30)
ys = np.linspace(0.25, 0.75, 30)
xs, ys = np.meshgrid(xs, ys)
txt = plt.text(0.48, 0.52, 'hello world', ha='center', fontsize=30,
rotation=30)
# uncomment to draw the text's bounding box
# txt.set_bbox(dict(edgecolor='black', facecolor='none'))
# draw the text. This is important, as the contains method can only work
# when a renderer exists.
plt.draw()
for x, y in zip(xs.flat, ys.flat):
mevent.x, mevent.y = plt.gca().transAxes.transform_point([x, y])
contains, _ = txt.contains(mevent)
color = 'yellow' if contains else 'red'
# capture the viewLim, plot a point, and reset the viewLim
vl = ax.viewLim.frozen()
ax.plot(x, y, 'o', color=color)
ax.viewLim.set(vl)
@image_comparison(baseline_images=['titles'])
def test_titles():
# left and right side titles
fig = plt.figure()
ax = plt.subplot(1, 1, 1)
ax.set_title("left title", loc="left")
ax.set_title("right title", loc="right")
ax.set_xticks([])
ax.set_yticks([])
@image_comparison(baseline_images=['text_alignment'])
def test_alignment():
fig = plt.figure()
ax = plt.subplot(1, 1, 1)
x = 0.1
for rotation in (0, 30):
for alignment in ('top', 'bottom', 'baseline', 'center'):
ax.text(x, 0.5, alignment + " Tj", va=alignment, rotation=rotation,
bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5))
ax.text(x, 1.0, r'$\sum_{i=0}^{j}$', va=alignment, rotation=rotation)
x += 0.1
ax.plot([0, 1], [0.5, 0.5])
ax.plot([0, 1], [1.0, 1.0])
ax.set_xlim([0, 1])
ax.set_ylim([0, 1.5])
ax.set_xticks([])
ax.set_yticks([])
| unlicense |
spectralDNS/shenfun | docs/paper/CG/CGpaper_dirichlet.py | 1 | 8842 | """
This script has been used to compute the Dirichlet results of the paper
Efficient spectral-Galerkin methods for second-order equations using different Chebyshev bases
The results have been computed using Python 3.9 and Shenfun 3.1.1.
The generalized Chebyshev-Tau results are computed with dedalus,
and are as such not part of this script.
"""
import sympy as sp
import numpy as np
import scipy.sparse.linalg as lin
import array_to_latex as a2l
from time import time
x = sp.Symbol('x', real=True)
fe = {}
rnd = {}
func = {}
def matvec(u_hat, f_hat, A, B, alpha, method):
"""Compute matrix vector product
Parameters
----------
u_hat : Function
The solution array
f_hat : Function
The right hand side array
A : SparseMatrix
The stiffness matrix
B : SparseMatrix
The mass matrix
alpha : number
The weight of the mass matrix
method : int
The chosen method
"""
from shenfun import chebyshev, la
if method == 1:
if alpha == 0:
A.scale *= -1
f_hat = A.matvec(u_hat, f_hat)
A.scale *= -1
else:
sol = chebyshev.la.Helmholtz(A, B, -1, alpha)
f_hat = sol.matvec(u_hat, f_hat)
else:
if alpha == 0:
A.scale *= -1
f_hat = A.matvec(u_hat, f_hat)
A.scale *= -1
else:
M = alpha*B - A
f_hat = M.matvec(u_hat, f_hat)
return f_hat
def get_solver(A, B, alpha, method):
"""Return optimal solver for given method
Parameters
----------
A : SparseMatrix
The stiffness matrix
B : SparseMatrix
The mass matrix
alpha : number
The weight of the mass matrix
method : int
The chosen method
"""
from shenfun import chebyshev, la
if method == 2:
if alpha == 0:
sol = la.TDMA(A*(-1))
else:
sol = la.PDMA(alpha*B - A)
elif method == 1:
if alpha == 0:
A.scale = -1
sol = chebyshev.la.ADD_Solve(A)
else:
sol = chebyshev.la.Helmholtz(A, B, -1, alpha)
elif method in (0, 3, 4):
if alpha == 0:
sol = chebyshev.la.TwoDMA(A*(-1))
else:
sol = chebyshev.la.FDMA(alpha*B-A)
elif method == 5:
if alpha == 0:
AA = A*(-1)
sol = AA.solve
else:
sol = la.TDMA(alpha*B-A)
else:
raise NotImplementedError
return sol
def solve(f_hat, u_hat, A, B, alpha, method):
"""Solve (alpha*B-A)u_hat = f_hat
Parameters
----------
f_hat : Function
The right hand side array
u_hat : Function
The solution array
A : SparseMatrix
The stiffness matrix
B : SparseMatrix
The mass matrix
alpha : number
The weight of the mass matrix
method : int
The chosen method
"""
from shenfun import extract_bc_matrices, Function
if isinstance(B, list):
u_hat.set_boundary_dofs()
bc_mat = extract_bc_matrices([B])
B = B[0]
w0 = Function(u_hat.function_space())
f_hat -= alpha*bc_mat[0].matvec(u_hat, w0)
sol = get_solver(A, B, alpha, method)
if method == 1 and alpha != 0:
u_hat = sol(u_hat, f_hat)
else:
u_hat = sol(f_hat, u_hat)
return u_hat
def main(N, method=0, alpha=0, returntype=0):
from shenfun import FunctionSpace, TrialFunction, TestFunction, \
inner, div, grad, chebyshev, SparseMatrix, Function, Array
global fe
basis = {0: ('ShenDirichlet', 'Heinrichs'),
1: ('ShenDirichlet', 'ShenDirichlet'),
2: ('Heinrichs', 'Heinrichs'),
3: ('DirichletU', 'ShenDirichlet'),
4: ('Orthogonal', 'ShenDirichlet'), # Quasi-Galerkin
5: ('ShenDirichlet', 'ShenDirichlet'), # Legendre
}
test, trial = basis[method]
if returntype == 2:
ue = sp.sin(100*sp.pi*x)
family = 'C' if method < 5 else 'L'
kw = {}
scaled = True if method in (0, 5) else False
if scaled:
kw['scaled'] = True
ST = FunctionSpace(N, family, basis=test, **kw)
TS = FunctionSpace(N, family, basis=trial, **kw)
wt = {0: 1, 1: 1, 2: 1, 3: 1-x**2, 4: 1, 5: 1}[method]
u = TrialFunction(TS)
v = TestFunction(ST)
A = inner(v*wt, div(grad(u)))
B = inner(v*wt, u)
if method == 4:
# Quasi
Q2 = chebyshev.quasi.QIGmat(N)
A = Q2*A
B = Q2*B
if method == 3:
k = np.arange(N-2)
K = SparseMatrix({0: 1/((k+1)*(k+2)*2)}, (N-2, N-2))
A[0] *= K[0]
A[2] *= K[0][:-2]
B[-2] *= K[0][2:]
B[0] *= K[0]
B[2] *= K[0][:-2]
B[4] *= K[0][:-4]
if returntype == 0:
M = alpha*B.diags()-A.diags()
con = np.linalg.cond(M.toarray())
elif returntype == 1:
# Use rnd to get the same random numbers for all methods
buf = rnd.get(N, np.random.random(N))
if not N in rnd:
rnd[N] = buf
v = Function(TS, buffer=buf)
v[-2:] = 0
u_hat = Function(TS)
f_hat = Function(TS)
f_hat = matvec(v, f_hat, A, B, alpha, method)
u_hat = solve(f_hat, u_hat, A, B, alpha, method)
con = np.abs(u_hat-v).max()
elif returntype == 2:
fe = alpha*ue - ue.diff(x, 2)
f_hat = Function(ST)
fj = Array(ST, buffer=fe)
if wt != 1:
fj *= np.sin((np.arange(N)+0.5)*np.pi/N)**2
f_hat = ST.scalar_product(fj, f_hat, fast_transform=True)
if method == 4:
f_hat[:-2] = Q2.diags('csc')*f_hat
if method == 3:
f_hat[:-2] *= K[0]
sol = get_solver(A, B, alpha, method)
u_hat = Function(TS)
u_hat = solve(f_hat, u_hat, A, B, alpha, method)
uj = Array(TS)
uj = TS.backward(u_hat, uj, fast_transform=True)
ua = Array(TS, buffer=ue)
con = np.sqrt(inner(1, (uj-ua)**2))
return con
if __name__ == '__main__':
import matplotlib.pyplot as plt
import argparse
import os
import sys
parser = argparse.ArgumentParser(description='Solve the Helmholtz problem with Dirichlet boundary conditions')
parser.add_argument('--return_type', action='store', type=int, required=True)
parser.add_argument('--include_legendre', action='store_true')
parser.add_argument('--verbose', '-v', action='count', default=0)
parser.add_argument('--plot', action='store_true')
parser.add_argument('--numba', action='store_true')
args = parser.parse_args()
if args.numba:
try:
import numba
os.environ['SHENFUN_OPTIMIZATION'] = 'NUMBA'
except ModuleNotFoundError:
os.warning('Numba not found - using Cython')
cond = []
if args.return_type == 2:
N = (2**4,2**6, 2**8, 2**12, 2**16, 2**20)
elif args.return_type == 1:
N = (2**4, 2**12, 2**20)
else:
N = (32, 64, 128, 256, 512, 1024, 2048)
M = 6 if args.include_legendre else 5
alphas = (0, 1000)
if args.return_type in (0, 2):
for alpha in alphas:
cond.append([])
if args.verbose > 0:
print('alpha =', alpha)
for basis in range(M): # To include Legendre use --include_legendre (takes hours for N=2**20)
if args.verbose > 1:
print('Method =', basis)
cond[-1].append([])
for n in N:
if args.verbose > 2:
print('N =', n)
cond[-1][-1].append(main(n, basis, alpha, args.return_type))
linestyle = {0: 'solid', 1: 'dashed', 2: 'dotted'}
for i in range(len(cond)):
plt.loglog(N, cond[i][0], 'b',
N, cond[i][1], 'r',
N, cond[i][2], 'k',
N, cond[i][3], 'm',
N, cond[i][4], 'y',
linestyle=linestyle[i])
if args.include_legendre:
plt.loglog(N, cond[i][5], 'y', linestyle=linestyle[i])
a2l.to_ltx(np.array(cond)[i], frmt='{:6.2e}', print_out=True, mathform=False)
else:
for basis in range(M):
cond.append([])
if args.verbose > 1:
print('Method =', basis)
for alpha in alphas:
if args.verbose > 0:
print('alpha =', alpha)
for n in N:
if args.verbose > 2:
print('N =', n)
cond[-1].append(main(n, basis, alpha, args.return_type))
a2l.to_ltx(np.array(cond), frmt='{:6.2e}', print_out=True, mathform=False)
if args.plot:
plt.show()
| bsd-2-clause |
jpautom/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
kashif/scikit-learn | sklearn/model_selection/tests/test_search.py | 23 | 30837 | """Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
# TODO Import from sklearn.exceptions once merged.
from sklearn.base import ChangedBehaviorWarning
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_labels():
# Check if ValueError (when labels is None) propagates to GridSearchCV
# And also check if labels is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
labels = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
gs.fit, X, y)
gs.fit(X, y, labels)
non_label_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_label_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
tracierenea/gnuradio | gr-filter/examples/channelize.py | 58 | 7003 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 2000000 # number of samples to use
self._fs = 1000 # initial sampling rate
self._M = M = 9 # Number of channels to channelize
self._ifs = M*self._fs # initial sampling rate
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._ifs, 475.50, 50,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
# Create a set of signals at different frequencies
# freqs lists the frequencies of the signals that get stored
# in the list "signals", which then get summed together
self.signals = list()
self.add = blocks.add_cc()
freqs = [-70, -50, -30, -10, 10, 20, 40, 60, 80]
for i in xrange(len(freqs)):
f = freqs[i] + (M/2-M+i+1)*self._fs
self.signals.append(analog.sig_source_c(self._ifs, analog.GR_SIN_WAVE, f, 1))
self.connect(self.signals[i], (self.add,i))
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps, 1)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Use this to play with the channel mapping
#self.pfb.set_channel_map([5,6,7,8,0,1,2,3,4])
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
Ns = 1000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._ifs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(X))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
robwarm/gpaw-symm | tools/niflheim-agts.py | 1 | 5426 | import os
import sys
import glob
import shutil
import subprocess
def cmd(c):
x = os.system(c)
assert x == 0, c
def fail(subject, email=None, filename='/dev/null', mailer='mail'):
assert mailer in ['mailx', 'mail', 'mutt']
import os
if email is not None:
if filename == '/dev/null':
assert os.system('mail -s "%s" %s < %s' %
(subject, email, filename)) == 0
else: # attachments
filenames = filename.split()
if mailer == 'mailx': # new mailx (12?)
attach = ''
for f in filenames:
attach += ' -a %s ' % f
# send with empty body
assert os.system('echo | mail %s -s "%s" %s' %
(attach, subject, email)) == 0
elif mailer == 'mail': # old mailx (8?)
attach = '('
for f in filenames:
ext = os.path.splitext(f)[-1]
if ext:
flog = os.path.basename(f).replace(ext, '.log')
else:
flog = f
attach += 'uuencode %s %s&&' % (f, flog)
# remove final &&
attach = attach[:-2]
attach += ')'
assert os.system('%s | mail -s "%s" %s' %
(attach, subject, email)) == 0
else: # mutt
attach = ''
for f in filenames:
attach += ' -a %s ' % f
# send with empty body
assert os.system('mutt %s -s "%s" %s < /dev/null' %
(attach, subject, email)) == 0
raise SystemExit
if '--dir' in sys.argv:
i = sys.argv.index('--dir')
dir = os.path.abspath(sys.argv[i+1])
else:
dir = 'agts'
if '--email' in sys.argv:
i = sys.argv.index('--email')
email = sys.argv[i+1]
else:
email = None
assert os.path.isdir(dir)
gpawdir = os.path.join(dir, 'gpaw')
# remove the old run directory
if os.path.isdir(dir):
shutil.rmtree(dir)
os.mkdir(dir)
os.chdir(dir)
cmd('svn checkout https://svn.fysik.dtu.dk/projects/gpaw/trunk gpaw')
# a version of gpaw is needed for imports from within this script!
cmd("\
cd " + gpawdir + "&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
python setup.py build_ext 2>&1 > build_ext.log")
# import gpaw from where it was installed
sys.path.insert(0, gpawdir)
cmd("echo '\
cd '" + gpawdir + "'&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
module load open64/4.2.3-0 && \
module load openmpi/1.3.3-1.el5.fys.open64.4.2.3 && \
module load hdf5/1.8.6-5.el5.fys.open64.4.2.3.openmpi.1.3.3 && \
python setup.py --remove-default-flags --customize=\
doc/install/Linux/Niflheim/el5-xeon-open64-acml-4.4.0-acml-4.4.0-hdf-SL-2.0.1.py \
build_ext 2>&1 > thul.log' | ssh thul bash")
cmd("echo '\
cd '" + gpawdir + "'&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
module load open64/4.2.3-0 && \
python setup.py --remove-default-flags --customize=\
doc/install/Linux/Niflheim/el5-opteron-open64-acml-4.4.0-acml-4.4.0-hdf-SL-2.0.1.py \
build_ext 2>&1 > fjorm.log' | ssh fjorm bash")
cmd("""wget --no-check-certificate --quiet \
http://wiki.fysik.dtu.dk/gpaw-files/gpaw-setups-latest.tar.gz && \
tar xzf gpaw-setups-latest.tar.gz && \
rm gpaw-setups-latest.tar.gz && \
mv gpaw-setups-[0-9]* gpaw/gpaw-setups""")
cmd('svn export https://svn.fysik.dtu.dk/projects/ase/trunk ase')
# ase needed
sys.path.insert(0, '%s/ase' % dir)
from gpaw.test.big.agts import AGTSQueue
from gpaw.test.big.niflheim import NiflheimCluster
queue = AGTSQueue()
queue.collect()
cluster = NiflheimCluster(asepath=os.path.join(dir, 'ase'),
setuppath=os.path.join(gpawdir, 'gpaw-setups'))
# Example below is confusing: job.script must NOT be the *.agts.py script,
# but the actual python script to be run!
# testsuite.agts.py does both: see gpaw/test/big/miscellaneous/testsuite.agts.py
#queue.jobs = [job for job in queue.jobs if job.script == 'testsuite.agts.py']
nfailed = queue.run(cluster)
gfiles = os.path.join(dir, 'gpaw-files')
if not os.path.isdir(gfiles):
os.mkdir(gfiles)
queue.copy_created_files(gfiles)
# make files readable by go
files = glob.glob(gfiles + '/*')
for f in files:
os.chmod(f, 0644)
from gpaw.version import version
subject = 'AGTS GPAW %s: ' % str(version)
# Send mail:
sfile = os.path.join(dir, 'status.log')
attach = sfile
if not nfailed:
subject += ' succeeded'
fail(subject, email, attach, mailer='mutt')
else:
subject += ' failed'
# attach failed tests error files
ft = [l.split()[0] for l in open(sfile).readlines() if 'FAILED' in l]
for t in ft:
ef = glob.glob(os.path.join(dir, t) + '.e*')
for f in ef:
attach += ' ' + f
fail(subject, email, attach, mailer='mutt')
if 0:
# Analysis:
import matplotlib
matplotlib.use('Agg')
from gpaw.test.big.analysis import analyse
user = os.environ['USER']
analyse(queue,
'../analysis/analyse.pickle', # file keeping history
'../analysis', # Where to dump figures
rev=niflheim.revision,
#mailto='gpaw-developers@listserv.fysik.dtu.dk',
mailserver='servfys.fysik.dtu.dk',
attachment='status.log')
| gpl-3.0 |
rahul-c1/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
micahcochran/geopandas | geopandas/tools/tests/test_sjoin.py | 1 | 10287 | from __future__ import absolute_import
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from shapely.geometry import Point, Polygon
import geopandas
from geopandas import GeoDataFrame, GeoSeries, read_file, base
from geopandas import sjoin
import pytest
from pandas.util.testing import assert_frame_equal
pandas_0_18_problem = 'fails under pandas < 0.19 due to pandas issue 15692,'\
'not problem with sjoin.'
@pytest.fixture()
def dfs(request):
polys1 = GeoSeries(
[Polygon([(0, 0), (5, 0), (5, 5), (0, 5)]),
Polygon([(5, 5), (6, 5), (6, 6), (5, 6)]),
Polygon([(6, 0), (9, 0), (9, 3), (6, 3)])])
polys2 = GeoSeries(
[Polygon([(1, 1), (4, 1), (4, 4), (1, 4)]),
Polygon([(4, 4), (7, 4), (7, 7), (4, 7)]),
Polygon([(7, 7), (10, 7), (10, 10), (7, 10)])])
df1 = GeoDataFrame({'geometry': polys1, 'df1': [0, 1, 2]})
df2 = GeoDataFrame({'geometry': polys2, 'df2': [3, 4, 5]})
if request.param == 'string-index':
df1.index = ['a', 'b', 'c']
df2.index = ['d', 'e', 'f']
# construction expected frames
expected = {}
part1 = df1.copy().reset_index().rename(
columns={'index': 'index_left'})
part2 = df2.copy().iloc[[0, 1, 1, 2]].reset_index().rename(
columns={'index': 'index_right'})
part1['_merge'] = [0, 1, 2]
part2['_merge'] = [0, 0, 1, 3]
exp = pd.merge(part1, part2, on='_merge', how='outer')
expected['intersects'] = exp.drop('_merge', axis=1).copy()
part1 = df1.copy().reset_index().rename(
columns={'index': 'index_left'})
part2 = df2.copy().reset_index().rename(
columns={'index': 'index_right'})
part1['_merge'] = [0, 1, 2]
part2['_merge'] = [0, 3, 3]
exp = pd.merge(part1, part2, on='_merge', how='outer')
expected['contains'] = exp.drop('_merge', axis=1).copy()
part1['_merge'] = [0, 1, 2]
part2['_merge'] = [3, 1, 3]
exp = pd.merge(part1, part2, on='_merge', how='outer')
expected['within'] = exp.drop('_merge', axis=1).copy()
return [request.param, df1, df2, expected]
@pytest.mark.skipif(not base.HAS_SINDEX, reason='Rtree absent, skipping')
class TestSpatialJoin:
@pytest.mark.parametrize('dfs', ['default-index', 'string-index'],
indirect=True)
@pytest.mark.parametrize('op', ['intersects', 'contains', 'within'])
def test_inner(self, op, dfs):
index, df1, df2, expected = dfs
res = sjoin(df1, df2, how='inner', op=op)
exp = expected[op].dropna().copy()
exp = exp.drop('geometry_y', axis=1).rename(
columns={'geometry_x': 'geometry'})
exp[['df1', 'df2']] = exp[['df1', 'df2']].astype('int64')
if index == 'default-index':
exp[['index_left', 'index_right']] = \
exp[['index_left', 'index_right']].astype('int64')
exp = exp.set_index('index_left')
exp.index.name = None
assert_frame_equal(res, exp)
@pytest.mark.parametrize('dfs', ['default-index', 'string-index'],
indirect=True)
@pytest.mark.parametrize('op', ['intersects', 'contains', 'within'])
def test_left(self, op, dfs):
index, df1, df2, expected = dfs
res = sjoin(df1, df2, how='left', op=op)
exp = expected[op].dropna(subset=['index_left']).copy()
exp = exp.drop('geometry_y', axis=1).rename(
columns={'geometry_x': 'geometry'})
exp['df1'] = exp['df1'].astype('int64')
if index == 'default-index':
exp['index_left'] = exp['index_left'].astype('int64')
# TODO: in result the dtype is object
res['index_right'] = res['index_right'].astype(float)
exp = exp.set_index('index_left')
exp.index.name = None
assert_frame_equal(res, exp)
@pytest.mark.parametrize('dfs', ['default-index', 'string-index'],
indirect=True)
@pytest.mark.parametrize('op', ['intersects', 'contains', 'within'])
def test_right(self, op, dfs):
index, df1, df2, expected = dfs
res = sjoin(df1, df2, how='right', op=op)
exp = expected[op].dropna(subset=['index_right']).copy()
exp = exp.drop('geometry_x', axis=1).rename(
columns={'geometry_y': 'geometry'})
exp['df2'] = exp['df2'].astype('int64')
if index == 'default-index':
exp['index_right'] = exp['index_right'].astype('int64')
res['index_left'] = res['index_left'].astype(float)
exp = exp.set_index('index_right')
exp = exp.reindex(columns=res.columns)
assert_frame_equal(res, exp, check_index_type=False)
@pytest.mark.skipif(not base.HAS_SINDEX, reason='Rtree absent, skipping')
class TestSpatialJoinNYBB:
def setup_method(self):
nybb_filename = geopandas.datasets.get_path('nybb')
self.polydf = read_file(nybb_filename)
self.crs = self.polydf.crs
N = 20
b = [int(x) for x in self.polydf.total_bounds]
self.pointdf = GeoDataFrame(
[{'geometry': Point(x, y),
'pointattr1': x + y, 'pointattr2': x - y}
for x, y in zip(range(b[0], b[2], int((b[2]-b[0])/N)),
range(b[1], b[3], int((b[3]-b[1])/N)))],
crs=self.crs)
def test_geometry_name(self):
# test sjoin is working with other geometry name
polydf_original_geom_name = self.polydf.geometry.name
self.polydf = (self.polydf.rename(columns={'geometry': 'new_geom'})
.set_geometry('new_geom'))
assert polydf_original_geom_name != self.polydf.geometry.name
res = sjoin(self.polydf, self.pointdf, how="left")
assert self.polydf.geometry.name == res.geometry.name
def test_sjoin_left(self):
df = sjoin(self.pointdf, self.polydf, how='left')
assert df.shape == (21, 8)
for i, row in df.iterrows():
assert row.geometry.type == 'Point'
assert 'pointattr1' in df.columns
assert 'BoroCode' in df.columns
def test_sjoin_right(self):
# the inverse of left
df = sjoin(self.pointdf, self.polydf, how="right")
df2 = sjoin(self.polydf, self.pointdf, how="left")
assert df.shape == (12, 8)
assert df.shape == df2.shape
for i, row in df.iterrows():
assert row.geometry.type == 'MultiPolygon'
for i, row in df2.iterrows():
assert row.geometry.type == 'MultiPolygon'
def test_sjoin_inner(self):
df = sjoin(self.pointdf, self.polydf, how="inner")
assert df.shape == (11, 8)
def test_sjoin_op(self):
# points within polygons
df = sjoin(self.pointdf, self.polydf, how="left", op="within")
assert df.shape == (21, 8)
assert df.ix[1]['BoroName'] == 'Staten Island'
# points contain polygons? never happens so we should have nulls
df = sjoin(self.pointdf, self.polydf, how="left", op="contains")
assert df.shape == (21, 8)
assert np.isnan(df.ix[1]['Shape_Area'])
def test_sjoin_bad_op(self):
# AttributeError: 'Point' object has no attribute 'spandex'
with pytest.raises(ValueError):
sjoin(self.pointdf, self.polydf, how="left", op="spandex")
def test_sjoin_duplicate_column_name(self):
pointdf2 = self.pointdf.rename(columns={'pointattr1': 'Shape_Area'})
df = sjoin(pointdf2, self.polydf, how="left")
assert 'Shape_Area_left' in df.columns
assert 'Shape_Area_right' in df.columns
def test_sjoin_values(self):
# GH190
self.polydf.index = [1, 3, 4, 5, 6]
df = sjoin(self.pointdf, self.polydf, how='left')
assert df.shape == (21, 8)
df = sjoin(self.polydf, self.pointdf, how='left')
assert df.shape == (12, 8)
@pytest.mark.skipif(str(pd.__version__) < LooseVersion('0.19'),
reason=pandas_0_18_problem)
@pytest.mark.xfail
def test_no_overlapping_geometry(self):
# Note: these tests are for correctly returning GeoDataFrame
# when result of the join is empty
df_inner = sjoin(self.pointdf.iloc[17:], self.polydf, how='inner')
df_left = sjoin(self.pointdf.iloc[17:], self.polydf, how='left')
df_right = sjoin(self.pointdf.iloc[17:], self.polydf, how='right')
# Recent Pandas development has introduced a new way of handling merges
# this change has altered the output when no overlapping geometries
if str(pd.__version__) > LooseVersion('0.18.1'):
right_idxs = pd.Series(range(0, 5), name='index_right',
dtype='int64')
else:
right_idxs = pd.Series(name='index_right', dtype='int64')
expected_inner_df = pd.concat(
[self.pointdf.iloc[:0],
pd.Series(name='index_right', dtype='int64'),
self.polydf.drop('geometry', axis=1).iloc[:0]],
axis=1)
expected_inner = GeoDataFrame(
expected_inner_df, crs={'init': 'epsg:4326', 'no_defs': True})
expected_right_df = pd.concat(
[self.pointdf.drop('geometry', axis=1).iloc[:0],
pd.concat([pd.Series(name='index_left', dtype='int64'),
right_idxs],
axis=1),
self.polydf],
axis=1)
expected_right = GeoDataFrame(
expected_right_df, crs={'init': 'epsg:4326', 'no_defs': True})\
.set_index('index_right')
expected_left_df = pd.concat(
[self.pointdf.iloc[17:],
pd.Series(name='index_right', dtype='int64'),
self.polydf.iloc[:0].drop('geometry', axis=1)],
axis=1)
expected_left = GeoDataFrame(
expected_left_df, crs={'init': 'epsg:4326', 'no_defs': True})
assert expected_inner.equals(df_inner)
assert expected_right.equals(df_right)
assert expected_left.equals(df_left)
@pytest.mark.skip("Not implemented")
def test_sjoin_outer(self):
df = sjoin(self.pointdf, self.polydf, how="outer")
assert df.shape == (21, 8)
| bsd-3-clause |
r-mart/scikit-learn | sklearn/cluster/mean_shift_.py | 96 | 15434 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <conradlee@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Martino Sorbaro <martino.sorbaro@ed.ac.uk>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None, n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
markelg/xray | xray/test/test_formatting.py | 6 | 3865 | import numpy as np
import pandas as pd
from xray.core import formatting
from xray.core.pycompat import PY3
from . import TestCase
class TestFormatting(TestCase):
def test_get_indexer_at_least_n_items(self):
cases = [
((20,), (slice(10),)),
((3, 20,), (0, slice(10))),
((2, 10,), (0, slice(10))),
((2, 5,), (slice(2), slice(None))),
((1, 2, 5,), (0, slice(2), slice(None))),
((2, 3, 5,), (0, slice(2), slice(None))),
((1, 10, 1,), (0, slice(10), slice(None))),
((2, 5, 1,), (slice(2), slice(None), slice(None))),
((2, 5, 3,), (0, slice(4), slice(None))),
((2, 3, 3,), (slice(2), slice(None), slice(None))),
]
for shape, expected in cases:
actual = formatting._get_indexer_at_least_n_items(shape, 10)
self.assertEqual(expected, actual)
def test_first_n_items(self):
array = np.arange(100).reshape(10, 5, 2)
for n in [3, 10, 13, 100, 200]:
actual = formatting.first_n_items(array, n)
expected = array.flat[:n]
self.assertItemsEqual(expected, actual)
with self.assertRaisesRegexp(ValueError, 'at least one item'):
formatting.first_n_items(array, 0)
def test_format_item(self):
cases = [
(pd.Timestamp('2000-01-01T12'), '2000-01-01T12:00:00'),
(pd.Timestamp('2000-01-01'), '2000-01-01'),
(pd.Timestamp('NaT'), 'NaT'),
(pd.Timedelta('10 days 1 hour'), '10 days 01:00:00'),
(pd.Timedelta('-3 days'), '-3 days +00:00:00'),
(pd.Timedelta('3 hours'), '0 days 03:00:00'),
(pd.Timedelta('NaT'), 'NaT'),
('foo', "'foo'"),
(u'foo', "'foo'" if PY3 else "u'foo'"),
(b'foo', "b'foo'" if PY3 else "'foo'"),
(1, '1'),
(1.0, '1.0'),
]
for item, expected in cases:
actual = formatting.format_item(item)
self.assertEqual(expected, actual)
def test_format_items(self):
cases = [
(np.arange(4) * np.timedelta64(1, 'D'),
'0 days 1 days 2 days 3 days'),
(np.arange(4) * np.timedelta64(3, 'h'),
'00:00:00 03:00:00 06:00:00 09:00:00'),
(np.arange(4) * np.timedelta64(500, 'ms'),
'00:00:00 00:00:00.500000 00:00:01 00:00:01.500000'),
(pd.to_timedelta(['NaT', '0s', '1s', 'NaT']),
'NaT 00:00:00 00:00:01 NaT'),
(pd.to_timedelta(['1 day 1 hour', '1 day', '0 hours']),
'1 days 01:00:00 1 days 00:00:00 0 days 00:00:00'),
([1, 2, 3], '1 2 3'),
]
for item, expected in cases:
actual = ' '.join(formatting.format_items(item))
self.assertEqual(expected, actual)
def test_format_array_flat(self):
actual = formatting.format_array_flat(np.arange(100), 13)
expected = '0 1 2 3 4 ...'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(100.0), 11)
expected = '0.0 1.0 ...'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(100.0), 1)
expected = '0.0 ...'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(3), 5)
expected = '0 1 2'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(4.0), 11)
expected = '0.0 1.0 ...'
self.assertEqual(expected, actual)
actual = formatting.format_array_flat(np.arange(4), 0)
expected = '0 ...'
self.assertEqual(expected, actual)
def test_pretty_print(self):
self.assertEqual(formatting.pretty_print('abcdefghij', 8), 'abcde...')
| apache-2.0 |
krikru/tensorflow-opencl | tensorflow/examples/learn/wide_n_deep_tutorial.py | 24 | 8941 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
from six.moves import urllib
import pandas as pd
import tensorflow as tf
COLUMNS = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"]
LABEL_COLUMN = "label"
CATEGORICAL_COLUMNS = ["workclass", "education", "marital_status", "occupation",
"relationship", "race", "gender", "native_country"]
CONTINUOUS_COLUMNS = ["age", "education_num", "capital_gain", "capital_loss",
"hours_per_week"]
def maybe_download(train_data, test_data):
"""Maybe downloads training data and returns train and test file names."""
if train_data:
train_file_name = train_data
else:
train_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve("http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data", train_file.name) # pylint: disable=line-too-long
train_file_name = train_file.name
train_file.close()
print("Training data is downloaded to %s" % train_file_name)
if test_data:
test_file_name = test_data
else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve("http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test", test_file.name) # pylint: disable=line-too-long
test_file_name = test_file.name
test_file.close()
print("Test data is downloaded to %s" % test_file_name)
return train_file_name, test_file_name
def build_estimator(model_dir, model_type):
"""Build an estimator."""
# Sparse base columns.
gender = tf.contrib.layers.sparse_column_with_keys(column_name="gender",
keys=["female", "male"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
# Transformations.
age_buckets = tf.contrib.layers.bucketized_column(age,
boundaries=[
18, 25, 30, 35, 40, 45,
50, 55, 60, 65
])
# Wide columns and deep columns.
wide_columns = [gender, native_country, education, occupation, workclass,
relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column(
[age_buckets, education, occupation],
hash_bucket_size=int(1e6)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(native_country,
dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
]
if model_type == "wide":
m = tf.contrib.learn.LinearClassifier(model_dir=model_dir,
feature_columns=wide_columns)
elif model_type == "deep":
m = tf.contrib.learn.DNNClassifier(model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
return m
def input_fn(df):
"""Input builder function."""
# Creates a dictionary mapping from each continuous feature column name (k) to
# the values of that column stored in a constant Tensor.
continuous_cols = {k: tf.constant(df[k].values) for k in CONTINUOUS_COLUMNS}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in CATEGORICAL_COLUMNS}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols)
feature_cols.update(categorical_cols)
# Converts the label column into a constant Tensor.
label = tf.constant(df[LABEL_COLUMN].values)
# Returns the feature columns and the label.
return feature_cols, label
def train_and_eval(model_dir, model_type, train_steps, train_data, test_data):
"""Train and evaluate the model."""
train_file_name, test_file_name = maybe_download(train_data, test_data)
df_train = pd.read_csv(
tf.gfile.Open(train_file_name),
names=COLUMNS,
skipinitialspace=True,
engine="python")
df_test = pd.read_csv(
tf.gfile.Open(test_file_name),
names=COLUMNS,
skipinitialspace=True,
skiprows=1,
engine="python")
# remove NaN elements
df_train = df_train.dropna(how='any', axis=0)
df_test = df_test.dropna(how='any', axis=0)
df_train[LABEL_COLUMN] = (
df_train["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
df_test[LABEL_COLUMN] = (
df_test["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
model_dir = tempfile.mkdtemp() if not model_dir else model_dir
print("model directory = %s" % model_dir)
m = build_estimator(model_dir, model_type)
m.fit(input_fn=lambda: input_fn(df_train), steps=train_steps)
results = m.evaluate(input_fn=lambda: input_fn(df_test), steps=1)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
FLAGS = None
def main(_):
train_and_eval(FLAGS.model_dir, FLAGS.model_type, FLAGS.train_steps,
FLAGS.train_data, FLAGS.test_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Base directory for output models."
)
parser.add_argument(
"--model_type",
type=str,
default="wide_n_deep",
help="Valid model types: {'wide', 'deep', 'wide_n_deep'}."
)
parser.add_argument(
"--train_steps",
type=int,
default=200,
help="Number of training steps."
)
parser.add_argument(
"--train_data",
type=str,
default="",
help="Path to the training data."
)
parser.add_argument(
"--test_data",
type=str,
default="",
help="Path to the test data."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
hmendozap/auto-sklearn | test/util/test_common.py | 1 | 1264 | # -*- encoding: utf-8 -*-
from __future__ import print_function
from functools import partial
import os
import unittest
from autosklearn.util import set_auto_seed, get_auto_seed, del_auto_seed, \
check_pid
class TestUtilsCommon(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.env_key = 'AUTOSKLEARN_SEED'
def test_auto_seed(self):
value = 123
set_auto_seed(value)
self.assertEqual(os.environ[self.env_key], str(value))
del_auto_seed()
self.assertEqual(os.environ.get(self.env_key), None)
def test_get_auto_seed(self):
del_auto_seed()
self.assertRaises(AssertionError, get_auto_seed)
set_auto_seed([])
self.assertRaises(ValueError, get_auto_seed)
self.assertRaises(ValueError, partial(set_auto_seed, 5))
del_auto_seed()
set_auto_seed(5)
self.assertEqual(os.environ.get(self.env_key), str(5))
def test_check_pid(self):
our_pid = os.getpid()
exists = check_pid(our_pid)
self.assertTrue(exists)
our_pid = -11000 # We hope this pid does not exist
exists = check_pid(our_pid)
self.assertFalse(exists)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
duncanmmacleod/gwpy | gwpy/plot/axes.py | 1 | 21895 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2018-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Extension of `~matplotlib.axes.Axes` for gwpy
"""
import warnings
from functools import wraps
from math import log
from numbers import Number
import numpy
from astropy.time import Time
from matplotlib import rcParams
from matplotlib.artist import allow_rasterization
from matplotlib.axes import Axes as _Axes
from matplotlib.axes._base import _process_plot_var_args
from matplotlib.collections import PolyCollection
from matplotlib.lines import Line2D
from matplotlib.projections import register_projection
from . import (Plot, colorbar as gcbar)
from .colors import format_norm
from .gps import GPS_SCALES
from .legend import HandlerLine2D
from ..time import to_gps
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
def log_norm(func):
"""Wrap ``func`` to handle custom gwpy keywords for a LogNorm colouring
"""
@wraps(func)
def decorated_func(*args, **kwargs):
norm, kwargs = format_norm(kwargs)
kwargs['norm'] = norm
return func(*args, **kwargs)
return decorated_func
def xlim_as_gps(func):
"""Wrap ``func`` to handle pass limit inputs through `gwpy.time.to_gps`
"""
@wraps(func)
def wrapped_func(self, left=None, right=None, **kw):
if right is None and numpy.iterable(left):
left, right = left
kw['left'] = left
kw['right'] = right
gpsscale = self.get_xscale() in GPS_SCALES
for key in ('left', 'right'):
if gpsscale:
try:
kw[key] = numpy.longdouble(str(to_gps(kw[key])))
except TypeError:
pass
return func(self, **kw)
return wrapped_func
def restore_grid(func):
"""Wrap ``func`` to preserve the Axes current grid settings.
"""
@wraps(func)
def wrapped_func(self, *args, **kwargs):
try:
grid = (
self.xaxis._minor_tick_kw["gridOn"],
self.xaxis._major_tick_kw["gridOn"],
self.yaxis._minor_tick_kw["gridOn"],
self.yaxis._major_tick_kw["gridOn"],
)
except KeyError: # matplotlib < 3.3.3
grid = (self.xaxis._gridOnMinor, self.xaxis._gridOnMajor,
self.yaxis._gridOnMinor, self.yaxis._gridOnMajor)
try:
return func(self, *args, **kwargs)
finally:
# reset grid
self.xaxis.grid(grid[0], which="minor")
self.xaxis.grid(grid[1], which="major")
self.yaxis.grid(grid[2], which="minor")
self.yaxis.grid(grid[3], which="major")
return wrapped_func
# -- new Axes -----------------------------------------------------------------
class Axes(_Axes):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# handle Series in `ax.plot()`
self._get_lines = PlotArgsProcessor(self)
# reset data formatters (for interactive plots) to support
# GPS time display
self.fmt_xdata = self._fmt_xdata
self.fmt_ydata = self._fmt_ydata
@allow_rasterization
def draw(self, *args, **kwargs):
labels = {}
for ax in (self.xaxis, self.yaxis):
if ax.get_scale() in GPS_SCALES and ax.isDefault_label:
labels[ax] = ax.get_label_text()
trans = ax.get_transform()
epoch = float(trans.get_epoch())
unit = trans.get_unit_name()
iso = Time(epoch, format='gps', scale='utc').iso
utc = iso.rstrip('0').rstrip('.')
ax.set_label_text('Time [{0!s}] from {1!s} UTC ({2!r})'.format(
unit, utc, epoch))
try:
super().draw(*args, **kwargs)
finally:
for ax in labels: # reset labels
ax.isDefault_label = True
# -- auto-gps helpers -----------------------
def _fmt_xdata(self, x):
if self.get_xscale() in GPS_SCALES:
return str(to_gps(x))
return self.xaxis.get_major_formatter().format_data_short(x)
def _fmt_ydata(self, y):
if self.get_yscale() in GPS_SCALES:
return str(to_gps(y))
return self.yaxis.get_major_formatter().format_data_short(y)
set_xlim = xlim_as_gps(_Axes.set_xlim)
def set_epoch(self, epoch):
"""Set the epoch for the current GPS scale.
This method will fail if the current X-axis scale isn't one of
the GPS scales. See :ref:`gwpy-plot-gps` for more details.
Parameters
----------
epoch : `float`, `str`
GPS-compatible time or date object, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
"""
scale = self.get_xscale()
return self.set_xscale(scale, epoch=epoch)
def get_epoch(self):
"""Return the epoch for the current GPS scale/
This method will fail if the current X-axis scale isn't one of
the GPS scales. See :ref:`gwpy-plot-gps` for more details.
"""
return self.get_xaxis().get_transform().get_epoch()
# -- overloaded plotting methods ------------
def scatter(self, x, y, c=None, **kwargs):
# scatter with auto-sorting by colour
try:
if c is None:
raise ValueError
c_array = numpy.asanyarray(c, dtype=float)
except ValueError: # no colour array
pass
else:
c_sort = kwargs.pop('c_sort', True)
if c_sort:
sortidx = c_array.argsort()
x = numpy.asarray(x)[sortidx]
y = numpy.asarray(y)[sortidx]
c = numpy.asarray(c)[sortidx]
return super().scatter(x, y, c=c, **kwargs)
scatter.__doc__ = _Axes.scatter.__doc__.replace(
'marker :',
'c_sort : `bool`, optional, default: True\n'
' Sort scatter points by `c` array value, if given.\n\n'
'marker :',
)
@log_norm
def imshow(self, array, *args, **kwargs):
"""Display an image, i.e. data on a 2D regular raster.
If ``array`` is a :class:`~gwpy.types.Array2D` (e.g. a
:class:`~gwpy.spectrogram.Spectrogram`), then the defaults are
_different_ to those in the upstream
:meth:`~matplotlib.axes.Axes.imshow` method. Namely, the defaults are
- ``origin='lower'`` (coordinates start in lower-left corner)
- ``aspect='auto'`` (pixels are not forced to be square)
- ``interpolation='none'`` (no image interpolation is used)
In all other usage, the defaults from the upstream matplotlib method
are unchanged.
Parameters
----------
array : array-like or PIL image
The image data.
*args, **kwargs
All arguments and keywords are passed to the inherited
:meth:`~matplotlib.axes.Axes.imshow` method.
See also
--------
matplotlib.axes.Axes.imshow
for details of the image rendering
"""
if hasattr(array, "yspan"): # Array2D
return self._imshow_array2d(array, *args, **kwargs)
image = super().imshow(array, *args, **kwargs)
self.autoscale(enable=None, axis='both', tight=None)
return image
def _imshow_array2d(self, array, origin='lower', interpolation='none',
aspect='auto', **kwargs):
"""Render an `~gwpy.types.Array2D` using `Axes.imshow`
"""
# NOTE: If you change the defaults for this method, please update
# the docstring for `imshow` above.
# calculate extent
extent = tuple(array.xspan) + tuple(array.yspan)
if self.get_xscale() == 'log' and extent[0] == 0.:
extent = (1e-300,) + extent[1:]
if self.get_yscale() == 'log' and extent[2] == 0.:
extent = extent[:2] + (1e-300,) + extent[3:]
kwargs.setdefault('extent', extent)
return self.imshow(array.value.T, origin=origin, aspect=aspect,
interpolation=interpolation, **kwargs)
@restore_grid
@log_norm
def pcolormesh(self, *args, **kwargs):
"""Create a pseudocolor plot with a non-regular rectangular grid.
When using GWpy, this method can be called with a single argument
that is an :class:`~gwpy.types.Array2D`, for which the ``X`` and ``Y``
coordinate arrays will be determined from the indexing.
In all other usage, all ``args`` and ``kwargs`` are passed directly
to :meth:`~matplotlib.axes.Axes.pcolormesh`.
Notes
-----
Unlike the upstream :meth:`matplotlib.axes.Axes.pcolormesh`,
this method respects the current grid settings.
See also
--------
matplotlib.axes.Axes.pcolormesh
"""
if len(args) == 1 and hasattr(args[0], "yindex"): # Array2D
return self._pcolormesh_array2d(*args, **kwargs)
return super().pcolormesh(*args, **kwargs)
def _pcolormesh_array2d(self, array, *args, **kwargs):
"""Render an `~gwpy.types.Array2D` using `Axes.pcolormesh`
"""
x = numpy.concatenate((array.xindex.value, array.xspan[-1:]))
y = numpy.concatenate((array.yindex.value, array.yspan[-1:]))
xcoord, ycoord = numpy.meshgrid(x, y, copy=False, sparse=True)
return self.pcolormesh(xcoord, ycoord, array.value.T, *args, **kwargs)
def hist(self, x, *args, **kwargs):
x = numpy.asarray(x)
# re-format weights as array if given as float
weights = kwargs.get('weights', None)
if isinstance(weights, Number):
kwargs['weights'] = numpy.ones_like(x) * weights
# calculate log-spaced bins on-the-fly
if (kwargs.pop('logbins', False) and
not numpy.iterable(kwargs.get('bins', None))):
nbins = kwargs.get('bins', None) or rcParams.get('hist.bins', 30)
# get range
hrange = kwargs.pop('range', None)
if hrange is None:
try:
hrange = numpy.min(x), numpy.max(x)
except ValueError as exc:
if str(exc).startswith('zero-size array'): # no data
exc.args = ('cannot generate log-spaced histogram '
'bins for zero-size array, '
'please pass `bins` or `range` manually',)
raise
# log-scale the axis and extract the base
if kwargs.get('orientation') == 'horizontal':
self.set_yscale('log', nonposy='clip')
logbase = self.yaxis._scale.base
else:
self.set_xscale('log', nonposx='clip')
logbase = self.xaxis._scale.base
# generate the bins
kwargs['bins'] = numpy.logspace(
log(hrange[0], logbase), log(hrange[1], logbase),
nbins+1, endpoint=True)
return super().hist(x, *args, **kwargs)
hist.__doc__ = _Axes.hist.__doc__.replace(
'color :',
'logbins : boolean, optional\n'
' If ``True``, use logarithmically-spaced histogram bins.\n\n'
' Default is ``False``\n\n'
'color :')
# -- new plotting methods -------------------
def plot_mmm(self, data, lower=None, upper=None, **kwargs):
"""Plot a `Series` as a line, with a shaded region around it.
The ``data`` `Series` is drawn, while the ``lower`` and ``upper``
`Series` are plotted lightly below and above, with a fill
between them and the ``data``.
All three `Series` should have the same `~Series.index` array.
Parameters
----------
data : `~gwpy.types.Series`
Data to plot normally.
lower : `~gwpy.types.Series`
Lower boundary (on Y-axis) for shade.
upper : `~gwpy.types.Series`
Upper boundary (on Y-axis) for shade.
**kwargs
Any other keyword arguments acceptable for
:meth:`~matplotlib.Axes.plot`.
Returns
-------
artists : `tuple`
All of the drawn artists:
- `~matplotlib.lines.Line2d` for ``data``,
- `~matplotlib.lines.Line2D` for ``lower``, if given
- `~matplotlib.lines.Line2D` for ``upper``, if given
- `~matplitlib.collections.PolyCollection` for shading
See also
--------
matplotlib.axes.Axes.plot
for a full description of acceptable ``*args`` and ``**kwargs``
"""
alpha = kwargs.pop('alpha', .1)
# plot mean
line, = self.plot(data, **kwargs)
out = [line]
# modify keywords for shading
kwargs.update({
'label': '',
'linewidth': line.get_linewidth() / 2,
'color': line.get_color(),
'alpha': alpha * 2,
})
# plot lower and upper Series
fill = [data.xindex.value, data.value, data.value]
for i, bound in enumerate((lower, upper)):
if bound is not None:
out.extend(self.plot(bound, **kwargs))
fill[i+1] = bound.value
# fill between
out.append(self.fill_between(
*fill, alpha=alpha, color=kwargs['color'],
rasterized=kwargs.get('rasterized', True)))
return out
def tile(self, x, y, w, h, color=None,
anchor='center', edgecolors='face', linewidth=0.8,
**kwargs):
"""Plot rectanguler tiles based onto these `Axes`.
``x`` and ``y`` give the anchor point for each tile, with
``w`` and ``h`` giving the extent in the X and Y axis respectively.
Parameters
----------
x, y, w, h : `array_like`, shape (n, )
Input data
color : `array_like`, shape (n, )
Array of amplitudes for tile color
anchor : `str`, optional
Anchor point for tiles relative to ``(x, y)`` coordinates, one of
- ``'center'`` - center tile on ``(x, y)``
- ``'ll'`` - ``(x, y)`` defines lower-left corner of tile
- ``'lr'`` - ``(x, y)`` defines lower-right corner of tile
- ``'ul'`` - ``(x, y)`` defines upper-left corner of tile
- ``'ur'`` - ``(x, y)`` defines upper-right corner of tile
**kwargs
Other keywords are passed to
:meth:`~matplotlib.collections.PolyCollection`
Returns
-------
collection : `~matplotlib.collections.PolyCollection`
the collection of tiles drawn
Examples
--------
>>> import numpy
>>> from matplotlib import pyplot
>>> import gwpy.plot # to get gwpy's Axes
>>> x = numpy.arange(10)
>>> y = numpy.arange(x.size)
>>> w = numpy.ones_like(x) * .8
>>> h = numpy.ones_like(x) * .8
>>> fig = pyplot.figure()
>>> ax = fig.gca()
>>> ax.tile(x, y, w, h, anchor='ll')
>>> pyplot.show()
"""
# get color and sort
if color is not None and kwargs.get('c_sort', True):
sortidx = color.argsort()
x = x[sortidx]
y = y[sortidx]
w = w[sortidx]
h = h[sortidx]
color = color[sortidx]
# define how to make a polygon for each tile
if anchor == 'll':
def _poly(x, y, w, h):
return ((x, y), (x, y+h), (x+w, y+h), (x+w, y))
elif anchor == 'lr':
def _poly(x, y, w, h):
return ((x-w, y), (x-w, y+h), (x, y+h), (x, y))
elif anchor == 'ul':
def _poly(x, y, w, h):
return ((x, y-h), (x, y), (x+w, y), (x+w, y-h))
elif anchor == 'ur':
def _poly(x, y, w, h):
return ((x-w, y-h), (x-w, y), (x, y), (x, y-h))
elif anchor == 'center':
def _poly(x, y, w, h):
return ((x-w/2., y-h/2.), (x-w/2., y+h/2.),
(x+w/2., y+h/2.), (x+w/2., y-h/2.))
else:
raise ValueError("Unrecognised tile anchor {!r}".format(anchor))
# build collection
cmap = kwargs.pop('cmap', rcParams['image.cmap'])
coll = PolyCollection((_poly(*tile) for tile in zip(x, y, w, h)),
edgecolors=edgecolors, linewidth=linewidth,
**kwargs)
if color is not None:
coll.set_array(color)
coll.set_cmap(cmap)
out = self.add_collection(coll)
self.autoscale_view()
return out
# -- overloaded auxiliary methods -----------
def legend(self, *args, **kwargs):
# handle deprecated keywords
linewidth = kwargs.pop("linewidth", None)
if linewidth:
warnings.warn(
"the linewidth keyword to gwpy.plot.Axes.legend has been "
"deprecated and will be removed in a future release; "
"please update your code to use a custom legend handler, "
"e.g. gwpy.plot.legend.HandlerLine2D.",
DeprecationWarning,
)
alpha = kwargs.pop("alpha", None)
if alpha:
kwargs.setdefault("framealpha", alpha)
warnings.warn(
"the alpha keyword to gwpy.plot.Axes.legend has been "
"deprecated and will be removed in a future release; "
"use framealpha instead.",
DeprecationWarning,
)
# build custom handler
handler_map = kwargs.setdefault("handler_map", dict())
if isinstance(handler_map, dict):
handler_map.setdefault(Line2D, HandlerLine2D(linewidth or 6))
# create legend
return super().legend(*args, **kwargs)
legend.__doc__ = _Axes.legend.__doc__.replace(
"Call signatures",
""".. note::
This method uses a custom default legend handler for
`~matplotlib.lines.Line2D` objects, with increased linewidth relative
to the upstream :meth:`~matplotlib.axes.Axes.legend` method.
To disable this, pass ``handler_map=None``, or create and pass your
own handler class. See :ref:`gwpy-plot-legend` for more details.
Call signatures""",
)
def colorbar(self, mappable=None, **kwargs):
"""Add a `~matplotlib.colorbar.Colorbar` to these `Axes`
Parameters
----------
mappable : matplotlib data collection, optional
collection against which to map the colouring, default will
be the last added mappable artist (collection or image)
fraction : `float`, optional
fraction of space to steal from these `Axes` to make space
for the new axes, default is ``0.`` if ``use_axesgrid=True``
is given (default), otherwise default is ``.15`` to match
the upstream matplotlib default.
**kwargs
other keyword arguments to be passed to the
:meth:`Plot.colorbar` generator
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See also
--------
Plot.colorbar
"""
fig = self.get_figure()
if kwargs.get('use_axesgrid', True):
kwargs.setdefault('fraction', 0.)
if kwargs.get('fraction', 0.) == 0.:
kwargs.setdefault('use_axesgrid', True)
mappable, kwargs = gcbar.process_colorbar_kwargs(
fig, mappable=mappable, ax=self, **kwargs)
if isinstance(fig, Plot):
# either we have created colorbar Axes using axesgrid1, or
# the user already gave use_axesgrid=False, so we forcefully
# disable axesgrid here in case fraction == 0., which causes
# gridspec colorbars to fail.
kwargs['use_axesgrid'] = False
return fig.colorbar(mappable, **kwargs)
# override default Axes with this one by registering a projection with the
# same name
register_projection(Axes)
# -- overload Axes.plot() to handle Series ------------------------------------
class PlotArgsProcessor(_process_plot_var_args):
"""This class controls how ax.plot() works
"""
def __call__(self, *args, **kwargs):
"""Find `Series` data in `plot()` args and unwrap
"""
newargs = []
while args:
# strip first argument
this, args = args[:1], args[1:]
# it its a 1-D Series, then parse it as (xindex, value)
if hasattr(this[0], "xindex") and this[0].ndim == 1:
this = (this[0].xindex.value, this[0].value)
# otherwise treat as normal (must be a second argument)
else:
this += args[:1]
args = args[1:]
# allow colour specs
if args and isinstance(args[0], str):
this += args[0],
args = args[1:]
newargs.extend(this)
return super().__call__(*newargs, **kwargs)
| gpl-3.0 |
phronesis-mnemosyne/census-schema-alignment | wit/wit/dev/authorship-embedding.py | 1 | 4685 | import pandas as pd
import urllib2
from pprint import pprint
from matplotlib import pyplot as plt
from bs4 import BeautifulSoup
from hashlib import md5
import sys
sys.path.append('/Users/BenJohnson/projects/what-is-this/wit/')
from wit import *
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 120)
np.set_printoptions(linewidth=250)
# May need to add things here to make this run the same way each time
np.random.seed(123)
# --
num_features = 10000 # Words
max_len = 100 # Words
formatter = KerasFormatter(num_features, max_len)
# --
# Load data
orig = pd.read_csv('/Users/BenJohnson/projects/laundering/sec/edward/analysis/crowdsar/crowdsar_user.csv', sep = '|', header = None)
orig.columns = ('hash', 'obj')
orig['id'] = 0
# Get
frequent_posters = orig.hash.value_counts().head(100).index
nfrequent_posters = orig.hash.value_counts().head(100).tail(25).index
sub = orig[orig.hash.isin(frequent_posters)]
sel = np.random.uniform(0, 1, sub.shape[0]) > .9
sub = sub[sel].drop_duplicates()
sel2 = np.random.uniform(0, 1, sub.shape[0]) > .5
df = sub[sel2]
tdf = sub[~sel2]
tdf2 = orig[orig.hash.isin(nfrequent_posters)].drop_duplicates()
sel3 = np.random.uniform(0, 1, tdf2.shape[0]) > .9
tdf2 = tdf2[sel3]
# --
train = make_triplet_train(df, N = 500)
trn, trn_levs = formatter.format(train, ['obj'], 'hash')
awl, awl_levs = formatter.format(train.drop_duplicates(), ['obj'], 'hash')
# tst, tst_levs = formatter.format(tdf, ['obj'], 'hash')
out, out_levs = formatter.format(tdf2, ['obj'], 'hash')
# --
# Define model
recurrent_size = 64
dense_size = 16
model = Sequential()
model.add(Embedding(num_features, recurrent_size))
model.add(LSTM(recurrent_size, return_sequences = True))
model.add(LSTM(recurrent_size))
model.add(Dense(dense_size))
model.add(Activation('unit_norm'))
model.compile(loss = 'triplet_cosine', optimizer = 'adam')
# --
# Train model
for i in range(60):
ms = modsel(train.shape[0], N = 3)
fitting = model.fit(
trn['x'][0][ms], trn['x'][0][ms],
nb_epoch = 3,
batch_size = 3 * 250,
shuffle = False
)
json_string = model.to_json()
open('author2_architecture.json', 'w').write(json_string)
model.save_weights('author2_weights.h5')
tr_preds = model.predict(awl['x'][0], verbose = True, batch_size = 250)
colors = awl['y'].argmax(1)
plt.scatter(tr_preds[:,0], tr_preds[:,1], c = colors)
plt.show()
# ------------------------------------------------
# Load pretrained model
#
# from keras.models import model_from_json
# model = model_from_json(open('author_architecture.json').read())
# model.load_weights('author_weights.h5')
# <<
shp = awl['y'].shape[1]
amax = awl['y'].argmax(1)
sims = np.zeros( (awl['y'].shape[1], awl['y'].shape[1]) )
tmps = [tr_preds[amax == i] for i in range(shp)]
for i in range(shp):
print i
a = tmps[i]
for j in range(shp):
b = tmps[j]
mn = np.mean(np.dot(a, b.T) > .8)
sims[i][j] = mn
np.mean(np.max(sims, 0) - np.diag(sims))
np.mean(np.max(sims, 0) - sims)
np.mean(sims.argmax(1) == np.arange(sims.shape[0]))
# >>
ts_preds = model.predict(tst['x'][0], verbose = True, batch_size = 250)
tmpsel = np.random.choice(ts_preds.shape[0], 5000)
sim = np.dot(ts_preds[tmpsel], tr_preds.T)
np.mean(tst['y'].argmax(1)[tmpsel] == awl['y'].argmax(1)[sim.argmax(1)])
tdf[]
# --
out_preds = model.predict(out['x'][0], verbose = True, batch_size = 250)
outsims = np.dot(out_preds, out_preds.T)
shp = out['y'].shape[1]
amax = out['y'].argmax(1)
sims = np.zeros( (out['y'].shape[1], out['y'].shape[1]) )
tmps = [out_preds[amax == i] for i in range(shp)]
for i in range(shp):
print i
a = tmps[i]
for j in range(shp):
b = tmps[j]
mn = np.mean(np.dot(a, b.T) > .8)
sims[i][j] = mn
sims.argmax(1) == np.arange(sims.shape[0])
np.fill_diagonal(outsims, 0)
rowmax = outsims.argmax(1)
by_user = map(lambda K: np.mean(amax[rowmax[amax == K]] == K), range(out['y'].shape[1]))
pprint(by_user)
# >>
from sklearn.cluster import KMeans
lens = np.array(tdf2.obj.apply(lambda x: len(str(x))))
km = KMeans(n_clusters = 26)
cl = km.fit_predict(out_preds[lens > 100])
amax = out['y'][lens > 100].argmax(1)
pd.crosstab(cl, amax)
# <<
# --
out_preds = model.predict(out['x'][0], verbose = True, batch_size = 250)
sel = np.random.uniform(0, 1, out_preds.shape[0]) > .5
outsims = np.dot(out_preds[sel], out_preds[~sel].T)
amax1 = out['y'].argmax(1)[sel]
amax2 = out['y'].argmax(1)[~sel]
conf = pd.crosstab(amax1, amax2[outsims.argmax(1)])
np.mean(np.array(conf).argmax(1) == range(conf.shape[0]))
| apache-2.0 |
shikhardb/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
xubenben/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
vbraga/irismatch | src/iris_detection.py | 2 | 1707 | #!/usr/bin/env python2.7
# Imported from https://gitorious.org/hough-circular-transform
# License: GPLv3
# Date: Fri, Mar 7 2014
import matplotlib.pyplot as plt
import matplotlib.patches as plt_patches
import houghcirculartransform as hct
import numpy as np
def detect_iris(filename):
"""
Example function call:
For a trickier example, load 'test2.png'!
(Can't fint the circle? Try the debug mode!)
((Pro tip: try lowering the threshold...))
"""
CH = hct.CircularHough()
raw_image = plt.imread(filename)
raw_image = raw_image[:,:,0] # get the first channel
print "[DEBUG] Image shape is: " + str(raw_image.shape)
min_size = min(raw_image.shape)
# 0.1 to 0.8 from Daugman paper
lower_bound = int(0.1 * min_size) / 2
upper_bound = int(0.8 * min_size) / 2
accumulator, radii = CH(raw_image, radii=np.arange(lower_bound, upper_bound, 3), threshold=0.01, binary=True, method='fft')
print "[DEBUG] Calling imshow"
plt.imshow(raw_image)
plt.title('Raw image (inverted)')
# Add appropriate circular patch to figure (thanks to MZ!):
for i, r in enumerate(radii):
# [Vitor] where i is the accumulator index and r is the radius
# accumulator a list of points
point = np.unravel_index(accumulator[i].argmax(), accumulator[i].shape)
try:
blob_circ = plt_patches.Circle((point[1], point[0]), r, fill=False, ec='red')
plt.gca().add_patch(blob_circ)
except ValueError:
print point, r
continue
# Fix axis distortion:
plt.axis('image')
plt.show()
if __name__ == '__main__':
detect_iris("../working-db/003L_3.png")
| gpl-2.0 |
nathanielvarona/airflow | tests/providers/apache/pinot/hooks/test_pinot.py | 1 | 9827 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import io
import os
import subprocess
import unittest
from unittest import mock
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.apache.pinot.hooks.pinot import PinotAdminHook, PinotDbApiHook
class TestPinotAdminHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.conn = conn = mock.MagicMock()
self.conn.host = 'host'
self.conn.port = '1000'
self.conn.extra_dejson = {'cmd_path': './pinot-admin.sh'}
class PinotAdminHookTest(PinotAdminHook):
def get_connection(self, conn_id):
return conn
self.db_hook = PinotAdminHookTest()
@mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')
def test_add_schema(self, mock_run_cli):
params = ["schema_file", False]
self.db_hook.add_schema(*params)
mock_run_cli.assert_called_once_with(
[
'AddSchema',
'-controllerHost',
self.conn.host,
'-controllerPort',
self.conn.port,
'-schemaFile',
params[0],
]
)
@mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')
def test_add_table(self, mock_run_cli):
params = ["config_file", False]
self.db_hook.add_table(*params)
mock_run_cli.assert_called_once_with(
[
'AddTable',
'-controllerHost',
self.conn.host,
'-controllerPort',
self.conn.port,
'-filePath',
params[0],
]
)
@mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')
def test_create_segment(self, mock_run_cli):
params = {
"generator_config_file": "a",
"data_dir": "b",
"segment_format": "c",
"out_dir": "d",
"overwrite": True,
"table_name": "e",
"segment_name": "f",
"time_column_name": "g",
"schema_file": "h",
"reader_config_file": "i",
"enable_star_tree_index": False,
"star_tree_index_spec_file": "j",
"hll_size": 9,
"hll_columns": "k",
"hll_suffix": "l",
"num_threads": 8,
"post_creation_verification": True,
"retry": 7,
}
self.db_hook.create_segment(**params)
mock_run_cli.assert_called_once_with(
[
'CreateSegment',
'-generatorConfigFile',
params["generator_config_file"],
'-dataDir',
params["data_dir"],
'-format',
params["segment_format"],
'-outDir',
params["out_dir"],
'-overwrite',
params["overwrite"],
'-tableName',
params["table_name"],
'-segmentName',
params["segment_name"],
'-timeColumnName',
params["time_column_name"],
'-schemaFile',
params["schema_file"],
'-readerConfigFile',
params["reader_config_file"],
'-starTreeIndexSpecFile',
params["star_tree_index_spec_file"],
'-hllSize',
params["hll_size"],
'-hllColumns',
params["hll_columns"],
'-hllSuffix',
params["hll_suffix"],
'-numThreads',
params["num_threads"],
'-postCreationVerification',
params["post_creation_verification"],
'-retry',
params["retry"],
]
)
@mock.patch('airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook.run_cli')
def test_upload_segment(self, mock_run_cli):
params = ["segment_dir", False]
self.db_hook.upload_segment(*params)
mock_run_cli.assert_called_once_with(
[
'UploadSegment',
'-controllerHost',
self.conn.host,
'-controllerPort',
self.conn.port,
'-segmentDir',
params[0],
]
)
@mock.patch('subprocess.Popen')
def test_run_cli_success(self, mock_popen):
mock_proc = mock.MagicMock()
mock_proc.returncode = 0
mock_proc.stdout = io.BytesIO(b'')
mock_popen.return_value.__enter__.return_value = mock_proc
params = ["foo", "bar", "baz"]
self.db_hook.run_cli(params)
params.insert(0, self.conn.extra_dejson.get('cmd_path'))
mock_popen.assert_called_once_with(
params, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True, env=None
)
@mock.patch('subprocess.Popen')
def test_run_cli_failure_error_message(self, mock_popen):
msg = b"Exception caught"
mock_proc = mock.MagicMock()
mock_proc.returncode = 0
mock_proc.stdout = io.BytesIO(msg)
mock_popen.return_value.__enter__.return_value = mock_proc
params = ["foo", "bar", "baz"]
with pytest.raises(AirflowException):
self.db_hook.run_cli(params)
params.insert(0, self.conn.extra_dejson.get('cmd_path'))
mock_popen.assert_called_once_with(
params, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True, env=None
)
@mock.patch('subprocess.Popen')
def test_run_cli_failure_status_code(self, mock_popen):
mock_proc = mock.MagicMock()
mock_proc.returncode = 1
mock_proc.stdout = io.BytesIO(b'')
mock_popen.return_value.__enter__.return_value = mock_proc
self.db_hook.pinot_admin_system_exit = True
params = ["foo", "bar", "baz"]
with pytest.raises(AirflowException):
self.db_hook.run_cli(params)
params.insert(0, self.conn.extra_dejson.get('cmd_path'))
env = os.environ.copy()
env.update({"JAVA_OPTS": "-Dpinot.admin.system.exit=true "})
mock_popen.assert_called_once_with(
params, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, close_fds=True, env=env
)
class TestPinotDbApiHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.conn = conn = mock.MagicMock()
self.conn.host = 'host'
self.conn.port = '1000'
self.conn.conn_type = 'http'
self.conn.extra_dejson = {'endpoint': 'query/sql'}
self.cur = mock.MagicMock(rowcount=0)
self.conn.cursor.return_value = self.cur
self.conn.__enter__.return_value = self.cur
self.conn.__exit__.return_value = None
class TestPinotDBApiHook(PinotDbApiHook):
def get_conn(self):
return conn
def get_connection(self, conn_id):
return conn
self.db_hook = TestPinotDBApiHook
def test_get_uri(self):
"""
Test on getting a pinot connection uri
"""
db_hook = self.db_hook()
assert db_hook.get_uri() == 'http://host:1000/query/sql'
def test_get_conn(self):
"""
Test on getting a pinot connection
"""
conn = self.db_hook().get_conn()
assert conn.host == 'host'
assert conn.port == '1000'
assert conn.conn_type == 'http'
assert conn.extra_dejson.get('endpoint') == 'query/sql'
def test_get_records(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchall.return_value = result_sets
assert result_sets == self.db_hook().get_records(statement)
def test_get_first(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchone.return_value = result_sets[0]
assert result_sets[0] == self.db_hook().get_first(statement)
def test_get_pandas_df(self):
statement = 'SQL'
column = 'col'
result_sets = [('row1',), ('row2',)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook().get_pandas_df(statement)
assert column == df.columns[0]
for i in range(len(result_sets)): # pylint: disable=consider-using-enumerate
assert result_sets[i][0] == df.values.tolist()[i][0]
class TestPinotDbApiHookIntegration(unittest.TestCase):
@pytest.mark.integration("pinot")
@mock.patch.dict('os.environ', AIRFLOW_CONN_PINOT_BROKER_DEFAULT="pinot://pinot:8000/")
def test_should_return_records(self):
hook = PinotDbApiHook()
sql = "select playerName from baseballStats ORDER BY playerName limit 5"
records = hook.get_records(sql)
assert [["A. Harry"], ["A. Harry"], ["Aaron"], ["Aaron Albert"], ["Aaron Albert"]] == records
| apache-2.0 |
rubikloud/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
hugobowne/scikit-learn | sklearn/externals/joblib/parallel.py | 31 | 35665 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
# Under Linux or OS X the default start method of multiprocessing
# can cause third party libraries to crash. Under Python 3.4+ it is possible
# to set an environment variable to switch the default start method from
# 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost
# of causing semantic changes and some additional pool instanciation overhead.
if hasattr(mp, 'get_context'):
method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = DEFAULT_MP_CONTEXT
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
# Only set self._iterating to True if at least a batch
# was dispatched. In particular this covers the edge
# case of Parallel used with an exhausted iterator.
while self.dispatch_one_batch(iterator):
self._iterating = True
else:
self._iterating = False
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
nickgentoo/scikit-learn-graph | skgraph/datasets/load_graph_datasets.py | 1 | 27349 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 13 13:02:41 2015
Copyright 2015 Nicolo' Navarin
This file is part of scikit-learn-graph.
scikit-learn-graph is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
scikit-learn-graph is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with scikit-learn-graph. If not, see <http://www.gnu.org/licenses/>.
"""
from scipy.sparse import csr_matrix
from ioskgraph import load_target
from ..graph import instance_to_graph
from sklearn.datasets.base import Bunch
#TODO import openbabel only if needed
#from obabel import obabel_to_eden
def dispatch(dataset):
if dataset=="CAS":
print "Loading bursi(CAS) dataset"
g_it=load_graphs_bursi()
elif dataset=="GDD":
print "Loading GDD dataset"
g_it=load_graphs_GDD()
elif dataset=="CPDB":
print "Loading CPDB dataset"
g_it=load_graphs_CPDB()
elif dataset=="AIDS":
print "Loading AIDS dataset"
g_it=load_graphs_AIDS()
elif dataset=="NCI1":
print "Loading NCI1 dataset"
g_it=load_graphs_NCI1()
elif dataset=="NCI109":
print "Loading NCI109 dataset"
g_it=load_graphs_NCI109()
elif dataset=="NCI123":
print "Loading NCI123 dataset"
g_it=load_graphs_NCI123()
elif dataset=="NCI_AIDS":
print "Loading NCI_AIDS dataset"
g_it=load_graphs_NCI_AIDS()
elif dataset=="Chemical2":
print "Loading LEUK40OV41LEUK47OV50 dataset"
g_it=load_graphs_LEUK40OV41LEUK47OV50()
elif dataset=="Chemical1":
print "Loading LEUK40LEUK47OV41OV50 dataset"
g_it=load_graphs_LEUK40LEUK47OV41OV50()
elif dataset=="Chemical3":
print "Loading LEUK40LEUK47OV41OV50LEUK40OV41LEUK47OV50 dataset"
g_it=load_graphs_LEUK40LEUK47OV41OV50LEUK40OV41LEUK47OV50()
elif dataset=="Chemical_reduced":
print "Loading LEUK40OV41LEUK47OV50 REDUCED dataset"
g_it=load_graphs_LEUK40OV41LEUK47OV50_reduced()
elif dataset=="MUTAG":
print "Loading MUTAG dataset"
g_it=load_graphs_MUTAG()
elif dataset=="enzymes":
print "Loading enzymes dataset"
g_it=load_graphs_enzymes()
elif dataset=="proteins":
print "Loading proteins dataset"
g_it=load_graphs_proteins()
elif dataset=="synthetic":
print "Loading synthetic dataset"
g_it=load_graphs_synthetic()
elif dataset=="BZR":
print "Loading BZR dataset"
g_it=load_graphs_BZR()
elif dataset=="COX2":
print "Loading COX2 dataset"
g_it=load_graphs_COX2()
elif dataset=="DHFR":
print "Loading DHFR dataset"
g_it=load_graphs_DHFR()
elif dataset=="PROTEINS_full":
print "Loading PROTEINS_full dataset"
g_it=load_graphs_PROTEINS_full()
elif dataset=="LMdata":
print "Loading LMdata dataset"
g_it=load_graphs_LMdata()
else:
print "Unknown dataset name"
return g_it
def convert_to_sparse_matrix(km):
# translate dictionary to Compressed Sparse Row matrix
if len(km) == 0:
raise Exception('ERROR: something went wrong, empty feature_dict. Perhaps wrong data format, i.e. do nodes have the "viewpoint" attribute?')
row, col, data = [], [], []
ne = len(km)
for i in range(ne):
for j in range(ne):
if (km[i][j]!=0):
row.append( i )
col.append( j )
data.append(km[i][j])
print len(row),len(col),len(data)
X = csr_matrix( (data,(row,col)), shape = (ne, ne))
return X
def load_graphs_GDD():
"""Load the GDD graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/GDD/GDD_labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/GDD/graphs.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url)
gra=[i for i in g_it]
print 'Loaded GDD graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
target=_target,
labels=True,
veclabels=False)
def load_graphs_MUTAG():
"""Load the MUTAG graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
from obabel import obabel_to_eden
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/MUTAG/mutag_188_target.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/MUTAG/mutag_188_data.can'
_target=load_target(input_target_url)
g_it=obabel_to_eden(input = input_data_url,file_type ='smi')
gra=[i for i in g_it]
print 'Loaded MUTAG graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
target=_target,
labels=True,
veclabels=False)
def load_graphs_CPDB():
"""Load the CPDB graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/CPDB/mutagen_labels.tab'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/CPDB/mutagen_smile.can'
_target=load_target(input_target_url)
from obabel import obabel_to_eden
g_it=obabel_to_eden(input = input_data_url,file_type ='smi')
gra=[i for i in g_it]
print 'Loaded CPDB graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
target=_target,
labels=True,
veclabels=False)
def load_graphs_AIDS():
"""Load the AIDS graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/AIDS/CAvsCM.y'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/AIDS/CAvsCM.can'
_target=load_target(input_target_url)
from obabel import obabel_to_eden
g_it=obabel_to_eden(input = input_data_url,file_type ='smi')
gra=[i for i in g_it]
print 'Loaded AIDS graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
target=_target,
labels=True,
veclabels=False)
def load_graphs_NCI1():
"""Load the NCI1 graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/NCI1/NCI1_labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/NCI1/NCI1_graphs.gspan'
_target=load_target(input_target_url)
label_dict={}
g_it=instance_to_graph(input = input_data_url)
#g_it=instance_to_graph(input = input_data_url,label_dict=label_dict)
print 'Loaded NCI1 graph dataset for graph classification.'
return Bunch(graphs=[i for i in g_it],
target=_target,
#label_dict=label_dict,
labels=True,
veclabels=False)
def load_graphs_NCI109():
"""Load the NCI109 graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/NCI109/NCI109_labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/NCI109/NCI109_graphs.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url)
print 'Loaded NCI109 graph dataset for graph classification.'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=False)
def load_graphs_bursi():
"""Load the Bursi graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.bioinf.uni-freiburg.de/~costa/bursi.target'
input_data_url='http://www.bioinf.uni-freiburg.de/~costa/bursi.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url)
print 'Loaded Bursi graph dataset for graph classification.'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=False)
def load_graphs_enzymes():
"""Load the ENZYMES graph dataset for (multiclass) graph classification from:
Schomburg, I., Chang, A., Ebeling, C., Gremse, M., Heldt, C., Huhn, G., & Schomburg, D. (2004).
BRENDA, the enzyme database: updates and major new developments.
Nucleic Acids Research, 32, D431–D433. doi:10.1093/nar/gkh081
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/ENZYMES.labels'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/ENZYMES.gspan'
#input_target_url='datasets/ENZYMES.labels'
#input_data_url='datasets/ENZYMES.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url) #url
#return Bunch(data=flat_data,
# target=target.astype(np.int),
# target_names=np.arange(10),
# images=images,
# DESCR=descr)
print 'Loaded ENZYMES graph dataset for (multiclass) graph classification from:'
print 'Schomburg, I., Chang, A., Ebeling, C., Gremse, M., Heldt, C., Huhn, G., & Schomburg, D. (2004).'
print 'BRENDA, the enzyme database: updates and major new developments.'
print 'Nucleic Acids Research, 32, D431–D433. doi:10.1093/nar/gkh081'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=True)
def load_graphs_proteins():
"""Load the PROTEINS graph dataset for graph classification from:
Dobson, P. D., & Doig, A. J. (2003)
Distinguishing enzyme structures from non-enzymes without alignments.
Journal of Molecular Biology, 330, 771–783. doi:10.1016/S0022-2836(03)00628-4
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/PROTEINS.labels'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/PROTEINS.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url) #url
print 'Loaded PROTEINS graph dataset for graph classification from:'
print 'Dobson, P. D., & Doig, A. J. (2003)'
print 'Distinguishing enzyme structures from non-enzymes without alignments.'
print 'Journal of Molecular Biology, 330, 771–783. doi:10.1016/S0022-2836(03)00628-4'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=True)
def load_graphs_synthetic():
"""Load the SYNTHETIC graph dataset for graph classification from:
Feragen, A., Kasenburg, N., Petersen, J., de Bruijne, M., & Borgwardt, K. M. (2013)
Scalable kernels for graphs with continuous attributes.
In Neural Information Processing Systems (NIPS) 2013 (pp. 216–224).
Retrieved from http://papers.nips.cc/paper/5155-scalable-kernels-for-graphs-with-continuous-attributes.pdf
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/SYNTHETICnew.labels'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/SYNTHETICnew.gspan'
#input_target_url='datasets/ENZYMES.labels'
#input_data_url='datasets/ENZYMES.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url) #url
#return Bunch(data=flat_data,
# target=target.astype(np.int),
# target_names=np.arange(10),
# images=images,
# DESCR=descr)
g=[i for i in g_it]
for i in g:
for n in i.nodes():
i.node[n]['label']=str(i.degree(n))
print 'Loaded SYNTHETIC graph dataset for graph classification from:'
print 'Feragen, A., Kasenburg, N., Petersen, J., de Bruijne, M., & Borgwardt, K. M. (2013)'
print 'Scalable kernels for graphs with continuous attributes.'
print 'In Neural Information Processing Systems (NIPS) 2013 (pp. 216–224).'
return Bunch(graphs=g,
target=_target,
labels=True,
veclabels=True)
def load_graphs_BZR():
"""Load the BZR graph dataset for graph classification from:
Neumann, M., Garnett R., Bauckhage Ch., Kersting K.: Propagation Kernels: Efficient Graph
Kernels from Propagated Information. Under review at MLJ.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/BZR_graph_labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/BZR.gspan'
#input_target_url='datasets/ENZYMES.labels'
#input_data_url='datasets/ENZYMES.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url) #url
#return Bunch(data=flat_data,
# target=target.astype(np.int),
# target_names=np.arange(10),
# images=images,
# DESCR=descr)
print 'Loaded BZR graph dataset for graph classification from:'
print 'Neumann, M., Garnett R., Bauckhage Ch., Kersting K.: Propagation Kernels: Efficient Graph'
print 'Kernels from Propagated Information. MLJ 2015.'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=True)
def load_graphs_COX2():
"""Load the COX2 graph dataset for graph classification from:
Neumann, M., Garnett R., Bauckhage Ch., Kersting K.: Propagation Kernels: Efficient Graph
Kernels from Propagated Information. Under review at MLJ.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/COX2_graph_labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/COX2.gspan'
#input_target_url='datasets/ENZYMES.labels'
#input_data_url='datasets/ENZYMES.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url) #url
#return Bunch(data=flat_data,
# target=target.astype(np.int),
# target_names=np.arange(10),
# images=images,
# DESCR=descr)
print 'Loaded COX2 graph dataset for graph classification from:'
print 'Neumann, M., Garnett R., Bauckhage Ch., Kersting K.: Propagation Kernels: Efficient Graph'
print 'Kernels from Propagated Information. MLJ 2015.'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=True)
def load_graphs_DHFR():
"""Load the DHFR graph dataset for graph classification from:
Neumann, M., Garnett R., Bauckhage Ch., Kersting K.: Propagation Kernels: Efficient Graph
Kernels from Propagated Information. Under review at MLJ.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/DHFR_graph_labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/DHFR.gspan'
#input_target_url='datasets/ENZYMES.labels'
#input_data_url='datasets/ENZYMES.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url) #url
#return Bunch(data=flat_data,
# target=target.astype(np.int),
# target_names=np.arange(10),
# images=images,
# DESCR=descr)
print 'Loaded DHFR graph dataset for graph classification from:'
print 'Neumann, M., Garnett R., Bauckhage Ch., Kersting K.: Propagation Kernels: Efficient Graph'
print 'Kernels from Propagated Information. MLJ 2015.'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=True)
def load_graphs_PROTEINS_full():
"""Load the PROTEINS_full graph dataset for graph classification from:
Neumann, M., Garnett R., Bauckhage Ch., Kersting K.: Propagation Kernels: Efficient Graph
Kernels from Propagated Information. Under review at MLJ.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/PROTEINS_full_graph_labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/PROTEINS_full.gspan'
#input_target_url='datasets/ENZYMES.labels'
#input_data_url='datasets/ENZYMES.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url) #url
#return Bunch(data=flat_data,
# target=target.astype(np.int),
# target_names=np.arange(10),
# images=images,
# DESCR=descr)
print 'Loaded PROTEINS_full graph dataset for graph classification from:'
print 'Neumann, M., Garnett R., Bauckhage Ch., Kersting K.: Propagation Kernels: Efficient Graph'
print 'Kernels from Propagated Information. MLJ 2015.'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=True)
def load_graphs_NCI123():
"""Load the NCI123 graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
from obabel import obabel_to_eden
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/Leukemia/leukemia_labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/Leukemia/leukemia.smile'
_target=load_target(input_target_url)
g_it=obabel_to_eden(input = input_data_url,file_type ='can')
gra=[i for i in g_it]
print 'Loaded NCI123 graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
target=_target,
labels=True,
veclabels=False)
def load_graphs_NCI_AIDS():
"""Load the NCI antiHIV graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/NCI_AIDS/AIDO99SD_numeric.labels'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/NCI_AIDS/AIDO99SD.gspan'
_target=load_target(input_target_url)
g_it=instance_to_graph(input = input_data_url)
print 'Loaded NCI antiHIV dataset graph dataset for graph classification.'
return Bunch(graphs=[i for i in g_it],
target=_target,
labels=True,
veclabels=False)
def load_graphs_LEUK40OV41LEUK47OV50():
#chemical2
"""Load the Chemical2 graph dataset for graph classification from
An Empirical Study on Budget-Aware Online Kernel Algorithms for Streams of Graphs
G Da San Martino, N Navarin, A Sperduti
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
from obabel import obabel_to_eden
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/DATASET_DRIFT_LEUK40OV41LEUK47OV50/labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/DATASET_DRIFT_LEUK40OV41LEUK47OV50/stream.can'
_target=load_target(input_target_url)
label_dict={}
counter=[1]
g_it=obabel_to_eden(input = input_data_url,file_type ='can',dict_labels=label_dict,counter=counter)
gra=[i for i in g_it]
print 'Loaded Chemical graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
label_dict=label_dict,
target=_target,
labels=True,
veclabels=False)
def load_graphs_LEUK40LEUK47OV41OV50():
#chemical1
"""Load the Chemical1 graph dataset for graph classification from
An Empirical Study on Budget-Aware Online Kernel Algorithms for Streams of Graphs
G Da San Martino, N Navarin, A Sperduti
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
from obabel import obabel_to_eden
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/DATASET_DRIFT_NEW/labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/DATASET_DRIFT_NEW/stream.can'
_target=load_target(input_target_url)
label_dict={}
counter=[1]
g_it=obabel_to_eden(input = input_data_url,file_type ='can',dict_labels=label_dict,counter=counter)
gra=[i for i in g_it]
print 'Loaded Chemical graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
label_dict=label_dict,
target=_target,
labels=True,
veclabels=False)
def load_graphs_LEUK40OV41LEUK47OV50_reduced():
"""Load the Chemical graph dataset for graph classification from
An Empirical Study on Budget-Aware Online Kernel Algorithms for Streams of Graphs
G Da San Martino, N Navarin, A Sperduti
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
from obabel import obabel_to_eden
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/DATASET_DRIFT_LEUK40OV41LEUK47OV50/labels_reduced_101.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/DATASET_DRIFT_LEUK40OV41LEUK47OV50/stream_reduced_101.can'
_target=load_target(input_target_url)
label_dict={}
counter=[1]
g_it=obabel_to_eden(input = input_data_url,file_type ='can',dict_labels=label_dict,counter=counter)
gra=[i for i in g_it]
print 'Loaded Chemical graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
label_dict=label_dict,
target=_target,
labels=True,
veclabels=False)
def load_graphs_LEUK40LEUK47OV41OV50LEUK40OV41LEUK47OV50():
#chemical1
"""Load the Chemical1 graph dataset for graph classification from
An Empirical Study on Budget-Aware Online Kernel Algorithms for Streams of Graphs
G Da San Martino, N Navarin, A Sperduti
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
from obabel import obabel_to_eden
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/DATASET_CHEMICAL_BIG/labels.txt'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets/DATASET_CHEMICAL_BIG/stream.can'
_target=load_target(input_target_url)
label_dict={}
counter=[1]
g_it=obabel_to_eden(input = input_data_url,file_type ='can',dict_labels=label_dict,counter=counter)
gra=[i for i in g_it]
print 'Loaded Chemical graph dataset for graph classification.'
print len(gra),'graphs.'
return Bunch(graphs=gra,
label_dict=label_dict,
target=_target,
labels=True,
veclabels=False)
def load_graphs_LMdata():
"""Load the LMdata graph dataset for graph classification..
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'graphs', the graphs in the dataset in Networkx format, 'target', the classification labels for each
sample.
"""
input_target_url='http://www.math.unipd.it/~nnavarin/datasets/LMdata/labels.txt.standardized'
input_data_url='http://www.math.unipd.it/~nnavarin/datasets//LMdata/graphs.gspan.standardized'
_target=load_target(input_target_url)
label_dict={}
counter=[1]
g_it=instance_to_graph(input_data_url,label_dict,counter)
print 'Loaded LMdata graph dataset for graph classification.'
return Bunch(graphs=[i for i in g_it],
label_dict=label_dict,
target=_target,
labels=True,
veclabels=False) | gpl-3.0 |
SamProtas/PALiquor | geocode_fixes.py | 1 | 2733 | import os
import pandas as pd
import numpy as np
import sqlite3
import requests
import time
def fix_location(lid, new_address):
pd.set_option('display.mpl_style', 'default')
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
DATABASE1 = os.path.join(PROJECT_ROOT, 'dbs', 'licensees.db')
conn1 = sqlite3.connect(DATABASE1)
c = conn1.cursor()
c.execute('SELECT address, latitude, longitude FROM licensees WHERE lid = ?',[lid])
old_info = c.fetchone()
old_latitude = old_info[1]
old_longitude = old_info[2]
if old_latitude or old_longitude:
return 'No need to fix. Aborting geocode call.'
api_key = 'NOT MY REAL KEY!!!!!'
baseurl = 'https://maps.googleapis.com/maps/api/geocode/json?key='+api_key+'&address='
fullurl = baseurl + new_address
page = requests.get(fullurl)
latitude = page.json()['results'][0]['geometry']['location']['lat']
longitude = page.json()['results'][0]['geometry']['location']['lng']
c.execute('UPDATE licensees SET address = ?, latitude = ?, longitude = ? WHERE lid = ?',[new_address, latitude, longitude, lid])
conn1.commit()
c.close()
return 'Good Fix'
# Manually fixed addresses
fix_location(233,'US Customs House Chestnut Street Philadelphia PA')
time.sleep(.2)
fix_location(43444, '431 South Streeet Philadelphia PA')
time.sleep(.2)
fix_location(45162, '2457 Grant Ave Philadelphia PA 19114')
time.sleep(.2)
fix_location(69585, '2400 Strawberry Mansion Drive Philadelphia, PA 19132')
time.sleep(.2)
fix_location(44218, 'Chickie and Petes Roosevelt Boulevard, Philadelphia, PA 19116')
time.sleep(.2)
fix_location(48788, 'Diamond Club at Mitten Hall 1913 North Broad Street Philadelphia, PA 19122')
time.sleep(.2)
fix_location(64349, '51 North 12th Street Philadelphia, PA 19107')
time.sleep(.2)
fix_location(64754, '1420 Locust Street Philadelphia PA 19102')
time.sleep(.2)
fix_location(50302, '39 Snyder Ave Philadelphia PA 19148')
time.sleep(.2)
fix_location(61215, '9910 Frankford Ave Philadelphia PA 19114')
time.sleep(.2)
fix_location(65590, '11000 E Roosevelt BLVD Philadelphia PA')
time.sleep(.2)
fix_location(26715, 'Knights Road Shopping Center 4018 Woodhaven Road Philadelphia, PA 19154')
time.sleep(.2)
fix_location(66741, '9183 Roosevelt BLVD Philadelphia PA 19114')
time.sleep(.2)
fix_location(65221, '129 S 30th St Philadelphia PA 19104')
time.sleep(.2)
fix_location(23775, 'The Bellevue Philadelphia PA 19103')
time.sleep(.2)
fix_location(55796, '5765 Wister St Philadelphia PA 19138')
time.sleep(.2)
fix_location(25469, 'Market East Philadelphia PA 19107')
time.sleep(.2)
fix_location(1140, 'torresdale and decatour, philadelphia pa')
| gpl-2.0 |
liyi193328/seq2seq | seq2seq/contrib/learn/tests/dataframe/arithmetic_transform_test.py | 62 | 2343 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for arithmetic transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class SumTestCase(test.TestCase):
"""Test class for `Sum` transform."""
def testSum(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a+b"] = frame["a"] + frame["b"]
expected_sum = pandas_df["a"] + pandas_df["b"]
actual_sum = frame.run_one_batch()["a+b"]
np.testing.assert_array_equal(expected_sum, actual_sum)
class DifferenceTestCase(test.TestCase):
"""Test class for `Difference` transform."""
def testDifference(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a-b"] = frame["a"] - frame["b"]
expected_diff = pandas_df["a"] - pandas_df["b"]
actual_diff = frame.run_one_batch()["a-b"]
np.testing.assert_array_equal(expected_diff, actual_diff)
if __name__ == "__main__":
test.main()
| apache-2.0 |
jeffzheng1/tensorflow | tensorflow/contrib/learn/python/learn/experiment.py | 4 | 15233 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experiment class collecting information needed for a single training run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import time
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
__all__ = ["Experiment"]
class Experiment(object):
"""Experiment is a class containing all information needed to train a model.
After an experiment is created (by passing an Estimator and inputs for
training and evaluation), an Experiment instance knows how to invoke training
and eval loops in a sensible fashion for distributed training.
"""
@deprecated_arg_values(
"2016-10-23",
"local_eval_frequency is deprecated as local_run will be renamed to "
"train_and_evaluate. Use min_eval_frequency and call train_and_evaluate "
"instead. Note, however, that the default for min_eval_frequency is 1, "
"meaning models will be evaluated every time a new checkpoint is "
"available. In contrast, the default for local_eval_frequency is None, "
"resulting in evaluation occurring only after training has completed. "
"min_eval_frequency is ignored when calling the deprecated local_run.",
local_eval_frequency=None)
def __init__(self,
estimator,
train_input_fn,
eval_input_fn,
eval_metrics=None,
train_steps=None,
eval_steps=100,
train_monitors=None,
local_eval_frequency=None,
eval_delay_secs=120,
continuous_eval_throttle_secs=60,
min_eval_frequency=1):
"""Constructor for `Experiment`.
Creates an Experiment instance. None of the functions passed to this
constructor are executed at construction time. They are stored and used
when a method is executed which requires it.
Args:
estimator: Object implementing `Trainable` and `Evaluable`.
train_input_fn: function, returns features and labels for training.
eval_input_fn: function, returns features and labels for evaluation. If
`eval_steps` is `None`, this should be configured only to produce for a
finite number of batches (generally, 1 epoch over the evaluation data).
eval_metrics: `dict` of string, metric function. If `None`, default set
is used.
train_steps: Perform this many steps of training. `None`, the default,
means train forever.
eval_steps: `evaluate` runs until input is exhausted (or another exception
is raised), or for `eval_steps` steps, if specified.
train_monitors: A list of monitors to pass to the `Estimator`'s `fit`
function.
local_eval_frequency: Frequency of running eval in steps,
when running locally. If `None`, runs evaluation only at the end of
training.
eval_delay_secs: Start evaluating after waiting for this many seconds.
continuous_eval_throttle_secs: Do not re-evaluate unless the last
evaluation was started at least this many seconds ago for
continuous_eval().
min_eval_frequency: (applies only to train_and_evaluate). the minimum
number of steps between evaluations. Of course, evaluation does not
occur if no new snapshot is available, hence, this is the minimum.
Raises:
ValueError: if `estimator` does not implement `Evaluable` and `Trainable`.
"""
if not isinstance(estimator, evaluable.Evaluable):
raise ValueError("`estimator` must implement `Evaluable`.")
if not isinstance(estimator, trainable.Trainable):
raise ValueError("`estimator` must implement `Trainable`.")
super(Experiment, self).__init__()
self._estimator = estimator
self._train_input_fn = train_input_fn
self._eval_input_fn = eval_input_fn
self._eval_metrics = eval_metrics
self._train_steps = train_steps
self._eval_steps = eval_steps
self._train_monitors = train_monitors
self._local_eval_frequency = local_eval_frequency
self._eval_delay_secs = eval_delay_secs
self._continuous_eval_throttle_secs = continuous_eval_throttle_secs
self._min_eval_frequency = min_eval_frequency
@property
def estimator(self):
return self._estimator
def train(self, delay_secs=None):
"""Fit the estimator using the training data.
Train the estimator for `self._train_steps` steps, after waiting for
`delay_secs` seconds. If `self._train_steps` is `None`, train forever.
Args:
delay_secs: Start training after this many seconds.
Returns:
The trained estimator.
"""
start = time.time()
# Start the server, if needed. It's important to start the server before
# we (optionally) sleep for the case where no device_filters are set.
# Otherwise, the servers will wait to connect to each other before starting
# to train. We might as well start as soon as we can.
if self._estimator.config.cluster_spec and self._estimator.config.master:
self._start_server()
if delay_secs is None:
task_id = self._estimator.config.task or 0
delay_secs = min(60, task_id * 5)
if delay_secs:
elapsed_secs = time.time() - start
remaining = delay_secs - elapsed_secs
logging.info("Waiting %d secs before starting training.", remaining)
time.sleep(delay_secs)
return self._estimator.fit(input_fn=self._train_input_fn,
max_steps=self._train_steps,
monitors=self._train_monitors)
def evaluate(self, delay_secs=None):
"""Evaluate on the evaluation data.
Runs evaluation on the evaluation data and returns the result. Runs for
`self._eval_steps` steps, or if it's `None`, then run until input is
exhausted or another exception is raised. Start the evaluation after
`delay_secs` seconds, or if it's `None`, defaults to using
`self._eval_delay_secs` seconds.
Args:
delay_secs: Start evaluating after this many seconds. If `None`, defaults
to using `self._eval_delays_secs`.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
if delay_secs is None:
delay_secs = self._eval_delay_secs
if delay_secs:
logging.info("Waiting %d secs before starting eval.", delay_secs)
time.sleep(delay_secs)
return self._estimator.evaluate(input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name="one_pass")
@deprecated(
"2016-10-23",
"local_run will be renamed to train_and_evaluate and the new default "
"behavior will be to run evaluation every time there is a new "
"checkpoint.")
def local_run(self):
with _new_attr_context(self, "_min_eval_frequency"):
self._min_eval_frequency = self._local_eval_frequency
return self.train_and_evaluate()
def _continuous_eval(self,
input_fn,
name,
delay_secs,
throttle_delay_secs):
"""Run continuous eval.
Runs infinite eval on the evaluation data set. This function starts
evaluating after `delay_secs` seconds and then runs no more than one
evaluation (with `self._eval_steps` steps each time) per
`throttle_delay_secs`. It never returns.
Args:
input_fn: The input to use for this eval.
name: A string appended to the folder name of evaluation results.
delay_secs: Start evaluating after this many seconds. If None, defaults to
self._eval_delay_secs.
throttle_delay_secs: Do not re-evaluate unless the last evaluation was
started at least this many seconds ago. If None, defaults to
self._continuous_eval_throttle_secs.
"""
if delay_secs is None:
delay_secs = self._eval_delay_secs
if throttle_delay_secs is None:
throttle_delay_secs = self._continuous_eval_throttle_secs
if delay_secs:
logging.info("Waiting %f secs before starting eval.", delay_secs)
time.sleep(delay_secs)
last_fitted_error_time = 0
while True:
start = time.time()
try:
self._estimator.evaluate(input_fn=input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=name)
except NotFittedError:
# Print warning message every 10 mins.
if time.time() - last_fitted_error_time > 600:
logging.warning(
"Estimator is not fitted yet. "
"Will start an evaluation when a checkpoint will be ready.")
last_fitted_error_time = time.time()
duration = time.time() - start
if duration < throttle_delay_secs:
difference = throttle_delay_secs - duration
logging.info("Waiting %f secs before starting next eval run.",
difference)
time.sleep(difference)
def continuous_eval(self, delay_secs=None, throttle_delay_secs=None):
self._continuous_eval(self._eval_input_fn,
name="continuous",
delay_secs=delay_secs,
throttle_delay_secs=throttle_delay_secs)
def continuous_eval_on_train_data(self,
delay_secs=None,
throttle_delay_secs=None):
self._continuous_eval(self._train_input_fn,
name="continuous_on_train_data",
delay_secs=delay_secs,
throttle_delay_secs=throttle_delay_secs)
def train_and_evaluate(self):
"""Interleaves training and evaluation.
The frequency of evaluation is controlled by the contructor arg
`min_eval_frequency`. When this parameter is None or 0, evaluation happens
only after training has completed. Note that evaluation cannot happen
more frequently than checkpoints are taken. If no new snapshots are
available when evaluation is supposed to occur, then evaluation doesn't
happen for another `min_eval_frequency` steps (assuming a checkpoint is
available at that point). Thus, settings `min_eval_frequency` to 1 means
that the model will be evaluated everytime there is a new checkpoint.
This is particular useful for a "Master" task in the cloud, whose
responsibility it is to take checkpoints, evaluate those checkpoints,
and write out summaries. Participating in training as the supervisor
allows such a task to accomplish the first and last items, while
performing evaluation allows for the second.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
# The directory to which evaluation summaries are written are determined
# by adding a suffix to 'eval'; that suffix is the 'name' parameter to
# the various evaluate(...) methods. By setting it to None, we force
# the directory name to simply be 'eval'.
eval_dir_suffix = None
# We set every_n_steps to 1, but evaluation only occurs when a new
# snapshot is available. If, by the time we finish evaluation
# there is a new snapshot, then we just evaluate again. Otherwise,
# we keep training until one becomes available.
with _new_attr_context(self, "_train_monitors"):
self._train_monitors = self._train_monitors or []
if self._min_eval_frequency:
self._train_monitors += [monitors.ValidationMonitor(
input_fn=self._eval_input_fn, eval_steps=self._eval_steps,
metrics=self._eval_metrics, every_n_steps=self._min_eval_frequency,
name=eval_dir_suffix,
)]
self.train(delay_secs=0)
return self._estimator.evaluate(input_fn=self._eval_input_fn,
steps=self._eval_steps,
metrics=self._eval_metrics,
name=eval_dir_suffix)
def run_std_server(self):
"""Starts a TensorFlow server and joins the serving thread.
Typically used for parameter servers.
Raises:
ValueError: if not enough information is available in the estimator's
config to create a server.
"""
self._start_server().join()
def test(self):
"""Tests training and evaluating the estimator both for a single step.
Returns:
The result of the `evaluate` call to the `Estimator`.
"""
self._estimator.fit(input_fn=self._train_input_fn,
steps=1,
monitors=self._train_monitors)
return self._estimator.evaluate(input_fn=self._eval_input_fn,
steps=1,
metrics=self._eval_metrics,
name="one_pass")
def _start_server(self):
"""Creates, starts, and returns a server_lib.Server."""
config = self._estimator.config
if (not config.cluster_spec or not config.job_name or not config.master or
config.task is None):
raise ValueError("Could not start server; be sure to specify "
"cluster_spec, job_name, master, and task in "
"RunConfig or set the TF_CONFIG environment variable.")
server = server_lib.Server(
config.cluster_spec,
job_name=config.job_name,
task_index=config.task,
config=config.tf_config,
start=False)
server.start()
return server
@contextlib.contextmanager
def _new_attr_context(obj, attr):
"""Creates a new context in which an object's attribute can be changed.
This creates a context in which an object's attribute can be changed.
Once the context is exited, the attribute reverts to its original value.
Example usage:
my_obj.x = 1
with _new_attr_context(my_obj, "x"):
my_obj.x = 2
print(my_obj.x)
print(my_obj.x)
"""
saved = getattr(obj, attr)
try:
yield
finally:
setattr(obj, attr, saved)
| apache-2.0 |
bendalab/thunderfish | thunderfish/pulseplots.py | 3 | 38137 | """
Plot and save key steps in pulses.py for visualizing the alorithm.
"""
import glob
import numpy as np
from scipy import stats
from matplotlib import rcParams, gridspec, ticker
import matplotlib.pyplot as plt
try:
from matplotlib.colors import colorConverter as cc
except ImportError:
import matplotlib.colors as cc
try:
from matplotlib.colors import to_hex
except ImportError:
from matplotlib.colors import rgb2hex as to_hex
from matplotlib.patches import ConnectionPatch, Rectangle
from matplotlib.lines import Line2D
import warnings
def warn(*args, **kwargs):
"""
Ignore all warnings.
"""
pass
warnings.warn=warn
# plotting parameters and colors:
rcParams['font.family'] = 'monospace'
cmap = plt.get_cmap("Dark2")
c_g = cmap(0)
c_o = cmap(1)
c_grey = cmap(7)
cmap_pts = [cmap(2), cmap(3)]
def darker(color, saturation):
""" Make a color darker.
From bendalab/plottools package.
Parameters
----------
color: dict or matplotlib color spec
A matplotlib color (hex string, name color string, rgb tuple)
or a dictionary with an 'color' or 'facecolor' key.
saturation: float
The smaller the saturation, the darker the returned color.
A saturation of 0 returns black.
A saturation of 1 leaves the color untouched.
A saturation of 2 returns white.
Returns
-------
color: string or dictionary
The darker color as a hexadecimal RGB string (e.g. '#rrggbb').
If `color` is a dictionary, a copy of the dictionary is returned
with the value of 'color' or 'facecolor' set to the darker color.
"""
try:
c = color['color']
cd = dict(**color)
cd['color'] = darker(c, saturation)
return cd
except (KeyError, TypeError):
try:
c = color['facecolor']
cd = dict(**color)
cd['facecolor'] = darker(c, saturation)
return cd
except (KeyError, TypeError):
if saturation > 2:
sauration = 2
if saturation > 1:
return lighter(color, 2.0-saturation)
if saturation < 0:
saturation = 0
r, g, b = cc.to_rgb(color)
rd = r*saturation
gd = g*saturation
bd = b*saturation
return to_hex((rd, gd, bd)).upper()
def lighter(color, lightness):
"""Make a color lighter
From bendalab/plottools package.
Parameters
----------
color: dict or matplotlib color spec
A matplotlib color (hex string, name color string, rgb tuple)
or a dictionary with an 'color' or 'facecolor' key.
lightness: float
The smaller the lightness, the lighter the returned color.
A lightness of 0 returns white.
A lightness of 1 leaves the color untouched.
A lightness of 2 returns black.
Returns
-------
color: string or dict
The lighter color as a hexadecimal RGB string (e.g. '#rrggbb').
If `color` is a dictionary, a copy of the dictionary is returned
with the value of 'color' or 'facecolor' set to the lighter color.
"""
try:
c = color['color']
cd = dict(**color)
cd['color'] = lighter(c, lightness)
return cd
except (KeyError, TypeError):
try:
c = color['facecolor']
cd = dict(**color)
cd['facecolor'] = lighter(c, lightness)
return cd
except (KeyError, TypeError):
if lightness > 2:
lightness = 2
if lightness > 1:
return darker(color, 2.0-lightness)
if lightness < 0:
lightness = 0
r, g, b = cc.to_rgb(color)
rl = r + (1.0-lightness)*(1.0 - r)
gl = g + (1.0-lightness)*(1.0 - g)
bl = b + (1.0-lightness)*(1.0 - b)
return to_hex((rl, gl, bl)).upper()
def xscalebar(ax, x, y, width, wunit=None, wformat=None, ha='left', va='bottom',
lw=None, color=None, capsize=None, clw=None, **kwargs):
"""Horizontal scale bar with label.
From bendalab/plottools package.
Parameters
----------
ax: matplotlib axes
Axes where to draw the scale bar.
x: float
x-coordinate where to draw the scale bar in relative units of the axes.
y: float
y-coordinate where to draw the scale bar in relative units of the axes.
width: float
Length of the scale bar in units of the data's x-values.
wunit: string or None
Optional unit of the data's x-values.
wformat: string or None
Optional format string for formatting the label of the scale bar
or simply a string used for labeling the scale bar.
ha: 'left', 'right', or 'center'
Scale bar aligned left, right, or centered to (x, y)
va: 'top' or 'bottom'
Label of the scale bar either above or below the scale bar.
lw: int, float, None
Line width of the scale bar.
color: matplotlib color
Color of the scalebar.
capsize: float or None
If larger then zero draw cap lines at the ends of the bar.
The length of the lines is given in points (same unit as linewidth).
clw: int, float, None
Line width of the cap lines.
kwargs: key-word arguments
Passed on to `ax.text()` used to print the scale bar label.
"""
ax.autoscale(False)
# ax dimensions:
pixelx = np.abs(np.diff(ax.get_window_extent().get_points()[:,0]))[0]
pixely = np.abs(np.diff(ax.get_window_extent().get_points()[:,1]))[0]
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
unitx = xmax - xmin
unity = ymax - ymin
dxu = np.abs(unitx)/pixelx
dyu = np.abs(unity)/pixely
# transform x, y from relative units to axis units:
x = xmin + x*unitx
y = ymin + y*unity
# bar length:
if wformat is None:
wformat = '%.0f'
if width < 1.0:
wformat = '%.1f'
try:
ls = wformat % width
width = float(ls)
except TypeError:
ls = wformat
# bar:
if ha == 'left':
x0 = x
x1 = x+width
elif ha == 'right':
x0 = x-width
x1 = x
else:
x0 = x-0.5*width
x1 = x+0.5*width
# line width:
if lw is None:
lw = 2
# color:
if color is None:
color = 'k'
# scalebar:
lh = ax.plot([x0, x1], [y, y], '-', color=color, lw=lw,
solid_capstyle='butt', clip_on=False)
# get y position of line in figure pixel coordinates:
ly = np.array(lh[0].get_window_extent(ax.get_figure().canvas.get_renderer()))[0,1]
# caps:
if capsize is None:
capsize = 0
if clw is None:
clw = 0.5
if capsize > 0.0:
dy = capsize*dyu
ax.plot([x0, x0], [y-dy, y+dy], '-', color=color, lw=clw,
solid_capstyle='butt', clip_on=False)
ax.plot([x1, x1], [y-dy, y+dy], '-', color=color, lw=clw,
solid_capstyle='butt', clip_on=False)
# label:
if wunit:
ls += u'\u2009%s' % wunit
if va == 'top':
th = ax.text(0.5*(x0+x1), y, ls, clip_on=False,
ha='center', va='bottom', **kwargs)
# get y coordinate of text bottom in figure pixel coordinates:
ty = np.array(th.get_window_extent(ax.get_figure().canvas.get_renderer()))[0,1]
dty = ly+0.5*lw + 2.0 - ty
else:
th = ax.text(0.5*(x0+x1), y, ls, clip_on=False,
ha='center', va='top', **kwargs)
# get y coordinate of text bottom in figure pixel coordinates:
ty = np.array(th.get_window_extent(ax.get_figure().canvas.get_renderer()))[1,1]
dty = ly-0.5*lw - 2.0 - ty
th.set_position((0.5*(x0+x1), y+dyu*dty))
return x0, x1, y
def yscalebar(ax, x, y, height, hunit=None, hformat=None, ha='left', va='bottom',
lw=None, color=None, capsize=None, clw=None, **kwargs):
"""Vertical scale bar with label.
From bendalab/plottools package.
Parameters
----------
ax: matplotlib axes
Axes where to draw the scale bar.
x: float
x-coordinate where to draw the scale bar in relative units of the axes.
y: float
y-coordinate where to draw the scale bar in relative units of the axes.
height: float
Length of the scale bar in units of the data's y-values.
hunit: string
Unit of the data's y-values.
hformat: string or None
Optional format string for formatting the label of the scale bar
or simply a string used for labeling the scale bar.
ha: 'left' or 'right'
Label of the scale bar either to the left or to the right
of the scale bar.
va: 'top', 'bottom', or 'center'
Scale bar aligned above, below, or centered on (x, y).
lw: int, float, None
Line width of the scale bar.
color: matplotlib color
Color of the scalebar.
capsize: float or None
If larger then zero draw cap lines at the ends of the bar.
The length of the lines is given in points (same unit as linewidth).
clw: int, float
Line width of the cap lines.
kwargs: key-word arguments
Passed on to `ax.text()` used to print the scale bar label.
"""
ax.autoscale(False)
# ax dimensions:
pixelx = np.abs(np.diff(ax.get_window_extent().get_points()[:,0]))[0]
pixely = np.abs(np.diff(ax.get_window_extent().get_points()[:,1]))[0]
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
unitx = xmax - xmin
unity = ymax - ymin
dxu = np.abs(unitx)/pixelx
dyu = np.abs(unity)/pixely
# transform x, y from relative units to axis units:
x = xmin + x*unitx
y = ymin + y*unity
# bar length:
if hformat is None:
hformat = '%.0f'
if height < 1.0:
hformat = '%.1f'
try:
ls = hformat % height
width = float(ls)
except TypeError:
ls = hformat
# bar:
if va == 'bottom':
y0 = y
y1 = y+height
elif va == 'top':
y0 = y-height
y1 = y
else:
y0 = y-0.5*height
y1 = y+0.5*height
# line width:
if lw is None:
lw = 2
# color:
if color is None:
color = 'k'
# scalebar:
lh = ax.plot([x, x], [y0, y1], '-', color=color, lw=lw,
solid_capstyle='butt', clip_on=False)
# get x position of line in figure pixel coordinates:
lx = np.array(lh[0].get_window_extent(ax.get_figure().canvas.get_renderer()))[0,0]
# caps:
if capsize is None:
capsize = 0
if clw is None:
clw = 0.5
if capsize > 0.0:
dx = capsize*dxu
ax.plot([x-dx, x+dx], [y0, y0], '-', color=color, lw=clw, solid_capstyle='butt',
clip_on=False)
ax.plot([x-dx, x+dx], [y1, y1], '-', color=color, lw=clw, solid_capstyle='butt',
clip_on=False)
# label:
if hunit:
ls += u'\u2009%s' % hunit
if ha == 'right':
th = ax.text(x, 0.5*(y0+y1), ls, clip_on=False, rotation=90.0,
ha='left', va='center', **kwargs)
# get x coordinate of text bottom in figure pixel coordinates:
tx = np.array(th.get_window_extent(ax.get_figure().canvas.get_renderer()))[0,0]
dtx = lx+0.5*lw + 2.0 - tx
else:
th = ax.text(x, 0.5*(y0+y1), ls, clip_on=False, rotation=90.0,
ha='right', va='center', **kwargs)
# get x coordinate of text bottom in figure pixel coordinates:
tx = np.array(th.get_window_extent(ax.get_figure().canvas.get_renderer()))[1,0]
dtx = lx-0.5*lw - 1.0 - tx
th.set_position((x+dxu*dtx, 0.5*(y0+y1)))
return x, y0, y1
def arrowed_spines(ax, ms=10):
""" Spine with arrow on the y-axis of a plot.
Parameters
----------
ax : matplotlib figure axis
Axis on which the arrow should be plot.
"""
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
ax.scatter([xmin], [ymax], s=ms, marker='^', clip_on=False, color='k')
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
def loghist(ax, x, bmin, bmax, n, c, orientation='vertical', label=''):
""" Plot histogram with logarithmic scale.
Parameters
----------
ax : matplotlib axis
Axis to plot the histogram on.
x : numpy array
Input data for histogram.
bmin : float
Minimum value for the histogram bins.
bmax : float
Maximum value for the histogram bins.
n : int
Number of bins.
c : matplotlib color
Color of histogram.
orientation : string (optional)
Histogram orientation.
Defaults to 'vertical'.
label : string (optional)
Label for x.
Defaults to '' (no label).
Returns
-------
n : array
The values of the histogram bins.
bins : array
The edges of the bins.
patches : BarContainer
Container of individual artists used to create the histogram.
"""
return ax.hist(x, bins=np.exp(np.linspace(np.log(bmin), np.log(bmax), n)),
color=c, orientation=orientation, label=label)
def plot_all(data, eod_p_times, eod_tr_times, fs, mean_eods):
"""Quick way to view the output of extract_pulsefish in a single plot.
Parameters
----------
data: array
Recording data.
eod_p_times: array of ints
EOD peak indices.
eod_tr_times: array of ints
EOD trough indices.
fs: float
Samplerate.
mean_eods: list of numpy arrays
Mean EODs of each pulsefish found in the recording.
"""
fig = plt.figure(figsize=(10, 5))
if len(eod_p_times) > 0:
gs = gridspec.GridSpec(2, len(eod_p_times))
ax = fig.add_subplot(gs[0,:])
ax.plot(np.arange(len(data))/fs, data, c='k', alpha=0.3)
for i, (pt, tt) in enumerate(zip(eod_p_times, eod_tr_times)):
ax.plot(pt, data[(pt*fs).astype('int')], 'o', label=i+1, ms=10, c=cmap(i))
ax.plot(tt, data[(tt*fs).astype('int')], 'o', label=i+1, ms=10, c=cmap(i))
ax.set_xlabel('time [s]')
ax.set_ylabel('amplitude [V]')
for i, m in enumerate(mean_eods):
ax = fig.add_subplot(gs[1,i])
ax.plot(1000*m[0], 1000*m[1], c='k')
ax.fill_between(1000*m[0], 1000*(m[1]-m[2]), 1000*(m[1]+m[2]), color=cmap(i))
ax.set_xlabel('time [ms]')
ax.set_ylabel('amplitude [mV]')
else:
plt.plot(np.arange(len(data))/fs, data, c='k', alpha=0.3)
plt.tight_layout()
def plot_clustering(samplerate, eod_widths, eod_hights, eod_shapes, disc_masks, merge_masks):
"""Plot all clustering steps.
Plot clustering steps on width, height and shape. Then plot the remaining EODs after
the EOD assessment step and the EODs after the merge step.
Parameters
----------
samplerate : float
Samplerate of EOD snippets.
eod_widths : list of three 1D numpy arrays
The first list entry gives the unique labels of all width clusters as a list of ints.
The second list entry gives the width values for each EOD in samples as a
1D numpy array of ints.
The third list entry gives the width labels for each EOD as a 1D numpy array of ints.
eod_hights : nested lists (2 layers) of three 1D numpy arrays
The first list entry gives the unique labels of all height clusters as a list of ints
for each width cluster.
The second list entry gives the height values for each EOD as a 1D numpy array
of floats for each width cluster.
The third list entry gives the height labels for each EOD as a 1D numpy array
of ints for each width cluster.
eod_shapes : nested lists (3 layers) of three 1D numpy arrays
The first list entry gives the raw EOD snippets as a 2D numpy array for each
height cluster in a width cluster.
The second list entry gives the snippet PCA values for each EOD as a 2D numpy array
of floats for each height cluster in a width cluster.
The third list entry gives the shape labels for each EOD as a 1D numpy array of ints
for each height cluster in a width cluster.
disc_masks : Nested lists (two layers) of 1D numpy arrays
The masks of EODs that are discarded by the discarding step of the algorithm.
The masks are 1D boolean arrays where
instances that are set to True are discarded by the algorithm. Discarding masks
are saved in nested lists that represent the width and height clusters.
merge_masks : Nested lists (two layers) of 2D numpy arrays
The masks of EODs that are discarded by the merging step of the algorithm.
The masks are 2D boolean arrays where
for each sample point `i` either `merge_mask[i,0]` or `merge_mask[i,1]` is set to True.
Here, merge_mask[:,0] represents the
peak-centered clusters and `merge_mask[:,1]` represents the trough-centered clusters.
Merge masks are saved in nested lists
that represent the width and height clusters.
"""
# create figure + transparant figure.
fig = plt.figure(figsize=(12, 7))
transFigure = fig.transFigure.inverted()
# set up the figure layout
outer = gridspec.GridSpec(1, 5, width_ratios=[1, 1, 2, 1, 2], left=0.05, right=0.95)
# set titles for each clustering step
titles = ['1. Widths', '2. Heights', '3. Shape', '4. Pulse EODs', '5. Merge']
for i, title in enumerate(titles):
title_ax = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec = outer[i])
ax = fig.add_subplot(title_ax[0])
ax.text(0, 110, title, ha='center', va='bottom', clip_on=False)
ax.set_xlim(-100, 100)
ax.set_ylim(-100, 100)
ax.axis('off')
# compute sizes for each axis
w_size = 1
h_size = len(eod_hights[1])
shape_size = np.sum([len(sl) for sl in eod_shapes[0]])
# count required axes sized for the last two plot columns.
disc_size = 0
merge_size= 0
for shapelabel, dmasks, mmasks in zip(eod_shapes[2], disc_masks, merge_masks):
for sl, dm, mm in zip(shapelabel, dmasks, mmasks):
uld1 = np.unique((sl[0]+1)*np.invert(dm[0]))
uld2 = np.unique((sl[1]+1)*np.invert(dm[1]))
disc_size = disc_size+len(uld1[uld1>0])+len(uld2[uld2>0])
uld1 = np.unique((sl[0]+1)*mm[0])
uld2 = np.unique((sl[1]+1)*mm[1])
merge_size = merge_size+len(uld1[uld1>0])+len(uld2[uld2>0])
# set counters to keep track of the plot axes
disc_block = 0
merge_block = 0
shape_count = 0
# create all axes
width_hist_ax = gridspec.GridSpecFromSubplotSpec(w_size, 1, subplot_spec = outer[0])
hight_hist_ax = gridspec.GridSpecFromSubplotSpec(h_size, 1, subplot_spec = outer[1])
shape_ax = gridspec.GridSpecFromSubplotSpec(shape_size, 1, subplot_spec = outer[2])
shape_windows = [gridspec.GridSpecFromSubplotSpec(2, 2, hspace=0.0, wspace=0.0,
subplot_spec=shape_ax[i])
for i in range(shape_size)]
EOD_delete_ax = gridspec.GridSpecFromSubplotSpec(disc_size, 1, subplot_spec=outer[3])
EOD_merge_ax = gridspec.GridSpecFromSubplotSpec(merge_size, 1, subplot_spec=outer[4])
# plot width labels histogram
ax1 = fig.add_subplot(width_hist_ax[0])
# set axes features.
ax1.set_xscale('log')
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax1.axes.xaxis.set_visible(False)
ax1.set_yticklabels([])
# indices for plot colors (dark to light)
colidxsw = -np.linspace(-1.25, -0.5, h_size)
for i, (wl, colw, uhl, eod_h, eod_h_labs, w_snip, w_feat, w_lab, w_dm, w_mm) in enumerate(zip(eod_widths[0], colidxsw, eod_hights[0], eod_hights[1], eod_hights[2], eod_shapes[0], eod_shapes[1], eod_shapes[2], disc_masks, merge_masks)):
# plot width hist
hw, _, _ = ax1.hist(eod_widths[1][eod_widths[2]==wl],
bins=np.linspace(np.min(eod_widths[1]), np.max(eod_widths[1]), 100),
color=lighter(c_o, colw), orientation='horizontal')
# set arrow when the last hist is plot so the size of the axes are known.
if i == h_size-1:
arrowed_spines(ax1, ms=20)
# determine total size of the hight historgams now.
my, b = np.histogram(eod_h, bins=np.exp(np.linspace(np.min(np.log(eod_h)),
np.max(np.log(eod_h)), 100)))
maxy = np.max(my)
# set axes features for hight hist.
ax2 = fig.add_subplot(hight_hist_ax[h_size-i-1])
ax2.set_xscale('log')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax2.set_xlim(0.9, maxy)
ax2.axes.xaxis.set_visible(False)
ax2.set_yscale('log')
ax2.yaxis.set_major_formatter(ticker.NullFormatter())
ax2.yaxis.set_minor_formatter(ticker.NullFormatter())
# define colors for plots
colidxsh = -np.linspace(-1.25, -0.5, len(uhl))
for n, (hl, hcol, snippets, features, labels, dmasks, mmasks) in enumerate(zip(uhl, colidxsh, w_snip, w_feat, w_lab, w_dm, w_mm)):
hh, _, _ = loghist(ax2, eod_h[eod_h_labs==hl], np.min(eod_h), np.max(eod_h), 100,
lighter(c_g, hcol), orientation='horizontal')
# set arrow spines only on last plot
if n == len(uhl)-1:
arrowed_spines(ax2, ms=10)
# plot line from the width histogram to the height histogram.
if n == 0:
coord1 = transFigure.transform(ax1.transData.transform([np.median(hw[hw!=0]),
np.median(eod_widths[1][eod_widths[2]==wl])]))
coord2 = transFigure.transform(ax2.transData.transform([0.9, np.mean(eod_h)]))
line = Line2D((coord1[0], coord2[0]), (coord1[1], coord2[1]),
transform=fig.transFigure, color='grey', linewidth=0.5)
fig.lines.append(line)
# compute sizes of the eod_discarding and merge steps
s1 = np.unique((labels[0]+1)*(~dmasks[0]))
s2 = np.unique((labels[1]+1)*(~dmasks[1]))
disc_block = disc_block + len(s1[s1>0]) + len(s2[s2>0])
s1 = np.unique((labels[0]+1)*(mmasks[0]))
s2 = np.unique((labels[1]+1)*(mmasks[1]))
merge_block = merge_block + len(s1[s1>0]) + len(s2[s2>0])
axs = []
disc_count = 0
merge_count = 0
# now plot the clusters for peak and trough centerings
for pt, cmap_pt in zip([0, 1], cmap_pts):
ax3 = fig.add_subplot(shape_windows[shape_size-1-shape_count][pt,0])
ax4 = fig.add_subplot(shape_windows[shape_size-1-shape_count][pt,1])
# remove axes
ax3.axes.xaxis.set_visible(False)
ax4.axes.yaxis.set_visible(False)
ax3.axes.yaxis.set_visible(False)
ax4.axes.xaxis.set_visible(False)
# set color indices
colidxss = -np.linspace(-1.25, -0.5, len(np.unique(labels[pt][labels[pt]>=0])))
j=0
for c in np.unique(labels[pt]):
if c<0:
# plot noise features + snippets
ax3.plot(features[pt][labels[pt]==c,0], features[pt][labels[pt]==c,1],
'.', color='lightgrey', label='-1', rasterized=True)
ax4.plot(snippets[pt][labels[pt]==c].T, linewidth=0.1,
color='lightgrey', label='-1', rasterized=True)
else:
# plot cluster features and snippets
ax3.plot(features[pt][labels[pt]==c,0], features[pt][labels[pt]==c,1],
'.', color=lighter(cmap_pt, colidxss[j]), label=c,
rasterized=True)
ax4.plot(snippets[pt][labels[pt]==c].T, linewidth=0.1,
color=lighter(cmap_pt, colidxss[j]), label=c, rasterized=True)
# check if the current cluster is an EOD, if yes, plot it.
if np.sum(dmasks[pt][labels[pt]==c]) == 0:
ax = fig.add_subplot(EOD_delete_ax[disc_size-disc_block+disc_count])
ax.axis('off')
# plot mean EOD snippet
ax.plot(np.mean(snippets[pt][labels[pt]==c], axis=0),
color=lighter(cmap_pt, colidxss[j]))
disc_count = disc_count + 1
# match colors and draw line..
coord1 = transFigure.transform(ax4.transData.transform([ax4.get_xlim()[1],
ax4.get_ylim()[0] + 0.5*(ax4.get_ylim()[1]-ax4.get_ylim()[0])]))
coord2 = transFigure.transform(ax.transData.transform([ax.get_xlim()[0],ax.get_ylim()[0] + 0.5*(ax.get_ylim()[1]-ax.get_ylim()[0])]))
line = Line2D((coord1[0], coord2[0]), (coord1[1], coord2[1]),
transform=fig.transFigure, color='grey',
linewidth=0.5)
fig.lines.append(line)
axs.append(ax)
# check if the current EOD survives the merge step
# if so, plot it.
if np.sum(mmasks[pt, labels[pt]==c])>0:
ax = fig.add_subplot(EOD_merge_ax[merge_size-merge_block+merge_count])
ax.axis('off')
ax.plot(np.mean(snippets[pt][labels[pt]==c], axis=0),
color=lighter(cmap_pt, colidxss[j]))
merge_count = merge_count + 1
j=j+1
if pt==0:
# draw line from hight cluster to EOD shape clusters.
coord1 = transFigure.transform(ax2.transData.transform([np.median(hh[hh!=0]),
np.median(eod_h[eod_h_labs==hl])]))
coord2 = transFigure.transform(ax3.transData.transform([ax3.get_xlim()[0],
ax3.get_ylim()[0]]))
line = Line2D((coord1[0], coord2[0]), (coord1[1], coord2[1]),
transform=fig.transFigure, color='grey', linewidth=0.5)
fig.lines.append(line)
shape_count = shape_count + 1
if len(axs)>0:
# plot lines that indicate the merged clusters.
coord1 = transFigure.transform(axs[0].transData.transform([axs[0].get_xlim()[1]+0.1*(axs[0].get_xlim()[1]-axs[0].get_xlim()[0]),
axs[0].get_ylim()[1]-0.25*(axs[0].get_ylim()[1]-axs[0].get_ylim()[0])]))
coord2 = transFigure.transform(axs[-1].transData.transform([axs[-1].get_xlim()[1]+0.1*(axs[-1].get_xlim()[1]-axs[-1].get_xlim()[0]),
axs[-1].get_ylim()[0]+0.25*(axs[-1].get_ylim()[1]-axs[-1].get_ylim()[0])]))
line = Line2D((coord1[0], coord2[0]), (coord1[1], coord2[1]),
transform=fig.transFigure, color='grey', linewidth=1)
fig.lines.append(line)
def plot_bgm(x, means, variances, weights, use_log, labels, labels_am, xlab):
"""Plot a BGM clustering step either on EOD width or height.
Parameters
----------
x : 1D numpy array of floats
BGM input values.
means : list of floats
BGM Gaussian means
variances : list of floats
BGM Gaussian variances.
weights : list of floats
BGM Gaussian weights.
use_log : boolean
True if the z-scored logarithm of the data was used as BGM input.
labels : 1D numpy array of ints
Labels defined by BGM model (before merging based on merge factor).
labels_am : 1D numpy array of ints
Labels defined by BGM model (after merging based on merge factor).
xlab : string
Label for plot (defines the units of the BGM data).
"""
if 'width' in xlab:
ccol = c_o
elif 'height' in xlab:
ccol = c_g
else:
ccol = 'b'
# get the transform that was used as BGM input
if use_log:
x_transform = stats.zscore(np.log(x))
xplot = np.exp(np.linspace(np.log(np.min(x)), np.log(np.max(x)), 1000))
else:
x_transform = stats.zscore(x)
xplot = np.linspace(np.min(x), np.max(x), 1000)
# compute the x values and gaussians
x2 = np.linspace(np.min(x_transform), np.max(x_transform), 1000)
gaussians = []
gmax = 0
for i, (w, m, std) in enumerate(zip(weights, means, variances)):
gaus = np.sqrt(w*stats.norm.pdf(x2, m, np.sqrt(std)))
gaussians.append(gaus)
gmax = max(np.max(gaus), gmax)
# compute classes defined by gaussian intersections
classes = np.argmax(np.vstack(gaussians), axis=0)
# find the minimum of any gaussian that is within its class
gmin = 100
for i, c in enumerate(np.unique(classes)):
gmin=min(gmin, np.min(gaussians[c][classes==c]))
# set up the figure
fig, ax1 = plt.subplots(figsize=(8, 4.8))
fig_ysize = 4
ax2 = ax1.twinx()
ax1.spines['top'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax1.set_xlabel('x [a.u.]')
ax1.set_ylabel('#')
ax2.set_ylabel('Likelihood')
ax2.set_yscale('log')
ax1.set_yscale('log')
if use_log:
ax1.set_xscale('log')
ax1.set_xlabel(xlab)
# define colors for plotting gaussians
colidxs = -np.linspace(-1.25, -0.5, len(np.unique(classes)))
# plot the gaussians
for i, c in enumerate(np.unique(classes)):
ax2.plot(xplot, gaussians[c], c=lighter(c_grey, colidxs[i]), linewidth=2,
label=r'$N(\mu_%i, \sigma_%i)$'%(c, c))
# plot intersection lines
ax2.vlines(xplot[1:][np.diff(classes)!=0], 0, gmax/gmin, color='k', linewidth=2,
linestyle='--')
ax2.set_ylim(gmin, np.max(np.vstack(gaussians))*1.1)
# plot data distributions and classes
colidxs = -np.linspace(-1.25, -0.5, len(np.unique(labels)))
for i, l in enumerate(np.unique(labels)):
if use_log:
h, binn, _ = loghist(ax1, x[labels==l], np.min(x), np.max(x), 100,
lighter(ccol, colidxs[i]), label=r'$x_%i$'%l)
else:
h, binn, _ = ax1.hist(x[labels==l], bins=np.linspace(np.min(x), np.max(x), 100),
color=lighter(ccol, colidxs[i]), label=r'$x_%i$'%l)
# annotate merged clusters
for l in np.unique(labels_am):
maps = np.unique(labels[labels_am==l])
if len(maps) > 1:
x1 = x[labels==maps[0]]
x2 = x[labels==maps[1]]
print(np.median(x1))
print(np.median(x2))
print(gmax)
ax2.plot([np.median(x1), np.median(x2)], [1.2*gmax, 1.2*gmax], c='k', clip_on=False)
ax2.plot([np.median(x1), np.median(x1)], [1.1*gmax, 1.2*gmax], c='k', clip_on=False)
ax2.plot([np.median(x2), np.median(x2)], [1.1*gmax, 1.2*gmax], c='k', clip_on=False)
ax2.annotate(r'$\frac{|{\tilde{x}_%i-\tilde{x}_%i}|}{max(\tilde{x}_%i, \tilde{x}_%i)} < \epsilon$' % (maps[0], maps[1], maps[0], maps[1]), [np.median(x1)*1.1, gmax*1.2], xytext=(10, 10), textcoords='offset points', fontsize=12, annotation_clip=False, ha='center')
# add legends and plot.
ax2.legend(loc='lower left', frameon=False, bbox_to_anchor=(-0.05, 1.3),
ncol=len(np.unique(classes)))
ax1.legend(loc='upper left', frameon=False, bbox_to_anchor=(-0.05, 1.3),
ncol=len(np.unique(labels)))
plt.tight_layout()
def plot_feature_extraction(raw_snippets, normalized_snippets, features, labels, dt, pt):
"""Plot clustering step on EOD shape.
Parameters
----------
raw_snippets : 2D numpy array
Raw EOD snippets.
normalized_snippets : 2D numpy array
Normalized EOD snippets.
features : 2D numpy array
PCA values for each normalized EOD snippet.
labels : 1D numpy array of ints
Cluster labels.
dt : float
Sample interval of snippets.
pt : int
Set to 0 for peak-centered EODs and set to 1 for trough-centered EODs.
"""
ccol = cmap_pts[pt]
# set up the figure layout
fig = plt.figure(figsize=(((2+0.2)*4.8), 4.8))
outer = gridspec.GridSpec(1, 2, wspace=0.2, hspace=0)
x = np.arange(-dt*1000*raw_snippets.shape[1]/2, dt*1000*raw_snippets.shape[1]/2, dt*1000)
snip_ax = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec = outer[0], hspace=0.35)
pc_ax = gridspec.GridSpecFromSubplotSpec(features.shape[1]-1, features.shape[1]-1,
subplot_spec = outer[1], hspace=0, wspace=0)
# 3 plots: raw snippets, normalized, pcs.
ax_raw_snip = fig.add_subplot(snip_ax[0])
ax_normalized_snip = fig.add_subplot(snip_ax[1])
colidxs = -np.linspace(-1.25, -0.5, len(np.unique(labels[labels>=0])))
j=0
for c in np.unique(labels):
if c<0:
color='lightgrey'
else:
color = lighter(ccol, colidxs[j])
j=j+1
ax_raw_snip.plot(x, raw_snippets[labels==c].T, color=color, label='-1',
rasterized=True, alpha=0.25)
ax_normalized_snip.plot(x, normalized_snippets[labels==c].T, color=color, alpha=0.25)
ax_raw_snip.spines['top'].set_visible(False)
ax_raw_snip.spines['right'].set_visible(False)
ax_raw_snip.get_xaxis().set_ticklabels([])
ax_raw_snip.set_title('Raw snippets')
ax_raw_snip.set_ylabel('Amplitude [a.u.]')
ax_normalized_snip.spines['top'].set_visible(False)
ax_normalized_snip.spines['right'].set_visible(False)
ax_normalized_snip.set_title('Normalized snippets')
ax_normalized_snip.set_ylabel('Amplitude [a.u.]')
ax_normalized_snip.set_xlabel('Time [ms]')
ax_raw_snip.axis('off')
ax_normalized_snip.axis('off')
ax_overlay = fig.add_subplot(pc_ax[:,:])
ax_overlay.set_title('Features')
ax_overlay.axis('off')
for n in range(features.shape[1]):
for m in range(n):
ax = fig.add_subplot(pc_ax[n-1,m])
ax.scatter(features[labels==c,m], features[labels==c,n], marker='.',
color=color, alpha=0.25)
ax.set_xlim(np.min(features), np.max(features))
ax.set_ylim(np.min(features), np.max(features))
ax.get_xaxis().set_ticklabels([])
ax.get_yaxis().set_ticklabels([])
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
if m==0:
ax.set_ylabel('PC %i'%(n+1))
if n==features.shape[1]-1:
ax.set_xlabel('PC %i'%(m+1))
ax = fig.add_subplot(pc_ax[0,features.shape[1]-2])
ax.set_xlim(np.min(features), np.max(features))
ax.set_ylim(np.min(features), np.max(features))
size = max(1, int(np.ceil(-np.log10(np.max(features)-np.min(features)))))
wbar = np.floor((np.max(features)-np.min(features))*10**size)/10**size
# should be smaller than the actual thing! so like x% of it?
xscalebar(ax, 0, 0, wbar, wformat='%%.%if'%size)
yscalebar(ax, 0, 0, wbar, hformat='%%.%if'%size)
ax.axis('off')
def plot_moving_fish(ws, dts, clusterss, ts, fishcounts, T, ignore_stepss):
"""Plot moving fish detection step.
Parameters
----------
ws : list of floats
Median width for each width cluster that the moving fish algorithm is computed on
(in seconds).
dts : list of floats
Sliding window size (in seconds) for each width cluster.
clusterss : list of 1D numpy int arrays
Cluster labels for each EOD cluster in a width cluster.
ts : list of 1D numpy float arrays
EOD emission times for each EOD in a width cluster.
fishcounts : list of lists
Sliding window timepoints and fishcounts for each width cluster.
T : float
Lenght of analyzed recording in seconds.
ignore_stepss : list of 1D int arrays
Mask for fishcounts that were ignored (ignored if True) in the moving_fish analysis.
"""
fig = plt.figure()
# create gridspec
outer = gridspec.GridSpec(len(ws), 1)
for i, (w, dt, clusters, t, fishcount, ignore_steps) in enumerate(zip(ws, dts, clusterss, ts, fishcounts, ignore_stepss)):
gs = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec = outer[i])
# axis for clusters
ax1 = fig.add_subplot(gs[0])
# axis for fishcount
ax2 = fig.add_subplot(gs[1])
# plot clusters as eventplot
for cnum, c in enumerate(np.unique(clusters[clusters>=0])):
ax1.eventplot(t[clusters==c], lineoffsets=cnum, linelengths=0.5, color=cmap(i))
cnum = cnum + 1
# Plot the sliding window
rect=Rectangle((0, -0.5), dt, cnum, linewidth=1, linestyle='--', edgecolor='k',
facecolor='none', clip_on=False)
ax1.add_patch(rect)
ax1.arrow(dt+0.1, -0.5, 0.5, 0, head_width=0.1, head_length=0.1, facecolor='k',
edgecolor='k')
# plot parameters
ax1.set_title(r'$\tilde{w}_%i = %.3f ms$'%(i, 1000*w))
ax1.set_ylabel('cluster #')
ax1.set_yticks(range(0, cnum))
ax1.set_xlabel('time')
ax1.set_xlim(0, T)
ax1.axes.xaxis.set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['left'].set_visible(False)
# plot for fishcount
x = fishcount[0]
y = fishcount[1]
ax2 = fig.add_subplot(gs[1])
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax2.axes.xaxis.set_visible(False)
yplot = np.copy(y)
ax2.plot(x+dt/2, yplot, linestyle='-', marker='.', c=cmap(i), alpha=0.25)
yplot[ignore_steps.astype(bool)] = np.NaN
ax2.plot(x+dt/2, yplot, linestyle='-', marker='.', c=cmap(i))
ax2.set_ylabel('Fish count')
ax2.set_yticks(range(int(np.min(y)), 1+int(np.max(y))))
ax2.set_xlim(0, T)
if i < len(ws)-1:
ax2.axes.xaxis.set_visible(False)
else:
ax2.axes.xaxis.set_visible(False)
xscalebar(ax2, 1, 0, 1, wunit='s', ha='right')
con = ConnectionPatch([0, -0.5], [dt/2, y[0]], "data", "data",
axesA=ax1, axesB=ax2, color='k')
ax2.add_artist(con)
con = ConnectionPatch([dt, -0.5], [dt/2, y[0]], "data", "data",
axesA=ax1, axesB=ax2, color='k')
ax2.add_artist(con)
plt.xlim(0, T)
| gpl-3.0 |
jerjorg/BZI | BZI/convergence.py | 1 | 6793 | import numpy as np
import matplotlib.pyplot as plt
import time
from BZI.symmetry import make_ptvecs
from BZI.sampling import make_grid
from BZI.pseudopots import Al_PP
from BZI.integration import monte_carlo
from BZI.plots import PlotMesh
class Convergence(object):
""" Compare integrations of pseudo-potentials by creating convergence plots.
Args:
pseudo_potential (function): a pseudo-potential function taken from
BZI.pseudopots
cutoff (float): the energy cutoff of the pseudo-potential
cell_type (str): the geometry of the integration cell
cell_constant (float): the size of the integration cell
offset (list): a vector that offsets the grid from the origin and is
given in grid coordinates.
grid_types (list): a list of grid types
grid_constants (list): a list of grid constants
integration_methods (list): a list of integration methods
Attributes:
pseudo_potential (function): a pseudo-potential function taken from
BZI.pseudopots
cell_type (str): the geometry of the integration cell.
cell_constant (float): the size of the integration cell.
cell_vectors (np.ndarray): an array vectors as columns of a 3x3 numpy
array that is used to create the cell
grid_types (list): a list of grid types
grid_constants (list): a list of grid constants
integration_methods (list): a list of integration methods
answer (float): the expected result of integration
errors (list): a list of errors for each grid type
nspts (list): a list of the number of sampling points for each grid type
integrals (list): a list of integral value for each grid type and constant
times (list): a list of the amount of time taken computing the grid
generation and integration.
"""
def __init__(self, pseudo_potential=None, cutoff=None, cell_centering=None,
cell_constants=None, cell_angles=None, offset=None,
grid_types=None, grid_constants=None,
integration_methods=None, origin=None, random = None):
self.pseudo_potential = pseudo_potential or Al_PP
self.cutoff = cutoff or 4.
self.cell_centering = cell_centering or "prim"
self.cell_constants = cell_constants or [1.]*3
self.cell_angles = cell_angles or [np.pi/2]*3
self.cell_vectors = make_ptvecs(self.cell_centering, self.cell_constants,
self.cell_angles)
self.grid_centerings = grid_centerings or ["prim", "base", "body", "face"]
self.grid_constants = grid_constants or [1/n for n in range(2,11)]
self.offset = offset or [0.,0.,0.]
# self.integration_methods = integration_methods or [rectangle_method]
self.origin = origin or [0.,0.,0.]
self.random = random or False
def compare_grids(self, answer, plot=False, save=False):
self.answer = answer
if self.random:
nm = len(self.grid_types)
self.nspts = [[] for _ in range(nm + 1)]
self.errors = [[] for _ in range(nm + 1)]
self.integrals = [[] for _ in range(nm + 1)]
self.times = [[] for _ in range(nm + 1)]
npts_list = [2**n for n in range(8,14)]
for npts in npts_list:
time1 = time.time()
integral = monte_carlo(self.pseudo_potential,
self.cell_vectors,
npts,
self.cutoff)
self.nspts[nm].append(npts)
self.integrals[nm].append(integral)
self.times[nm].append((time.time() - time1))
self.errors[nm].append(np.abs(self.integrals[nm][-1] - answer))
else:
self.nspts = [[] for _ in range(len(self.grid_types))]
self.errors = [[] for _ in range(len(self.grid_types))]
self.integrals = [[] for _ in range(len(self.grid_types))]
self.times = [[] for _ in range(len(self.grid_types))]
integration_method = self.integration_methods[0]
for (i,grid_centering) in enumerate(self.grid_centering_list):
for grid_consts in self.grid_constants_list:
for grid_angles in grid_angles_list:
grid_vecs = make_ptvecs(grid_centering, grid_consts, grid_angles)
time1 = time.time()
npts, integral = integration_method(self.pseudo_potential,
self.cell_vectors,
grid_vecs,
self.offset,
self.origin,
self.cutoff)
self.nspts[i].append(npts)
self.integrals[i].append(integral)
self.times[i].append((time.time() - time1))
self.errors[i].append(np.abs(self.integrals[i][-1] - answer))
if save:
np.save("%s_times" %self.pseudo_potential, self.times)
np.save("%s_integrals" %self.pseudo_potential, self.integrals)
np.save("%s_errors" %self.pseudo_potential, self.errors)
if plot:
if self.random:
plt.loglog(self.nspts[nm], self.errors[nm], label="random", color="orange")
for i in range(len(self.grid_types)):
plt.loglog(self.nspts[i], self.errors[i], label=self.grid_types[i])
plt.xlabel("Number of samping points")
plt.ylabel("Error")
test = [1./n**(2./3) for n in self.nspts[0]]
plt.loglog(self.nspts[0], test, label="1/n**(2/3)")
lgd = plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.grid()
plt.show()
plt.close()
for i in range(len(self.grid_types)):
plt.loglog(self.nspts[i], self.times[i], label=self.grid_types[i])
plt.xlabel("Number of samping points")
plt.ylabel("Time (s)")
lgd = plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.grid()
plt.show()
plt.close()
def plot_grid(self,i,j):
"""Plot one of the grids in the convergence plot.
"""
grid_vecs = make_ptvecs(self.grid_types[i], self.grid_constants[j])
grid_pts = make_grid(self.rcell_vectors, gr_vecs, self.offset)
PlotMesh(grid_pts, self.rcell_vectors, self.offset)
| gpl-3.0 |
chrsrds/scikit-learn | examples/manifold/plot_swissroll.py | 72 | 1295 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
perrette/pyglacier | pyglacier/plotting.py | 1 | 1106 | import matplotlib.pyplot as plt
#
# plotting
#
def plot_elevation(ds, ax=None):
if ax is None:
ax = plt.gca()
ds['hs'].plot(ax=ax,label="surface")
ds['hb'].plot(ax=ax,label="bottom")
# add horizontal line to indicate sea level
ax.hlines(0, ds.x[0], ds.x[-1], linestyle='dashed', color='black')
ds['zb'].plot(ax=ax, color='black', linewidth=2, label="bedrock") # add bedrock
ax.legend(frameon=False, loc="upper right")
def plot_velocity(ds, ax=None):
if ax is None:
ax = plt.gca()
ds = ds.copy()
u = 'u' if 'u' in ds else 'U'
ds[u] = ds[u]*3600*24
ds[u].plot(ax=ax)
ax.set_ylabel('velocity [m/d]')
def plot_glacier(ds):
fig,axes=plt.subplots(2,1,sharex=True)
ax=axes[0]
plot_elevation(ds, ax)
ax=axes[1]
plot_velocity(ds, ax)
ax.set_xlim([ds.x[0], ds.x[-1]])
return fig, axes
def plot_stress(ds):
_v = ["driving", "lat", "long", "basal", "residual"]
try:
ds = ds.take(_v)
except KeyError:
ds = ds.take([k + '_stress' for k in _v])
return ds.to_array(axis='stress').T.plot()
| mit |
samzhang111/wikipedia-jargon | all-subjects/make_tf_differences.py | 1 | 2815 | from __future__ import print_function
import msgpack
import sys
import os
from collections import defaultdict
from helpers import text_dict_to_term_dict
from WikiExtractor import clean, compact
import pandas as pd
def remove_wikipedia_markup(text):
return compact(clean(text.decode('utf8')))
def print_help_and_exit(msg=''):
if msg:
print('Error: {}\n'.format(msg))
print('Usage: python make_tf_differences.py [n-grams] [path to directory]')
print('The directory should contain files output by grab_texts.py')
sys.exit(1)
if len(sys.argv) <= 2:
print_help_and_exit()
##############################################################
# Read in msgpack files, separating them from simple and en Wikipedia
##############################################################
ngrams = int(sys.argv[1])
text_dir = sys.argv[2]
only = sys.argv[3:]
print('Only calculating for: ', only)
try:
files = os.listdir(text_dir)
except OSError:
print_help_and_exit()
##############################################################
# Organize the text files by subject, then wiki (en or simple)
##############################################################
file_dict = defaultdict(dict)
for f in files:
try:
subject, wiki, _ = f.split('_')
if only and subject not in only:
continue
file_dict[subject][wiki] = f
except ValueError:
print_help_and_exit('Text directory does not contain valid filenames')
for subject in file_dict:
print('Importing ', subject)
with open(os.path.join(text_dir, file_dict[subject]['en'])) as f:
en_text = msgpack.load(f)
en_text = {k: remove_wikipedia_markup(v) for k,v in en_text.items()}
with open(os.path.join(text_dir, file_dict[subject]['simple'])) as f:
sm_text = msgpack.load(f)
sm_text = {k: remove_wikipedia_markup(v) for k,v in sm_text.items()}
print('Calculating term differences')
en_tf, en_counts = text_dict_to_term_dict(en_text, ngrams)
sm_tf, sm_counts = text_dict_to_term_dict(sm_text, ngrams)
sm_terms = set(sm_tf)
en_terms = set(en_tf)
term_differences = {}
for t in sm_terms.union(en_terms):
term_differences[t] = en_tf[t] - sm_tf[t]
sorted_term_difference = sorted(term_differences.items(),
key=lambda x: x[1])
print('Outputting term differences')
td_df = pd.DataFrame(sorted_term_difference, columns=['term',
'term_difference'])
td_df['en_tf'] = td_df.term.apply(lambda x: en_tf[x])
td_df['sm_tf'] = td_df.term.apply(lambda x: sm_tf[x])
try:
os.mkdir('data/term-diffs/ngrams-{}'.format(ngrams))
except OSError:
pass
td_df.to_csv('data/term-diffs/ngrams-{}/{}_td.csv'.format(ngrams, subject),
index=False, encoding='utf8')
| gpl-3.0 |
ofgulban/scikit-image | doc/examples/filters/plot_rank_mean.py | 7 | 1525 | """
============
Mean filters
============
This example compares the following mean filters of the rank filter package:
* **local mean**: all pixels belonging to the structuring element to compute
average gray level.
* **percentile mean**: only use values between percentiles p0 and p1
(here 10% and 90%).
* **bilateral mean**: only use pixels of the structuring element having a gray
level situated inside g-s0 and g+s1 (here g-500 and g+500)
Percentile and usual mean give here similar results, these filters smooth the
complete image (background and details). Bilateral mean exhibits a high
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.morphology import disk
from skimage.filters import rank
image = data.coins()
selem = disk(20)
percentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9)
bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500)
normal_result = rank.mean(image, selem=selem)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 10),
sharex=True, sharey=True)
ax = axes.ravel()
titles = ['Original', 'Percentile mean', 'Bilateral mean', 'Local mean']
imgs = [image, percentile_result, bilateral_result, normal_result]
for n in range(0, len(imgs)):
ax[n].imshow(imgs[n])
ax[n].set_title(titles[n])
ax[n].set_adjustable('box-forced')
ax[n].axis('off')
plt.show()
| bsd-3-clause |
CrazyGuo/vincent | examples/map_examples.py | 11 | 6721 | # -*- coding: utf-8 -*-
"""
Vincent Map Examples
"""
#Build a map from scratch
from vincent import *
world_topo = r'world-countries.topo.json'
state_topo = r'us_states.topo.json'
lake_topo = r'lakes_50m.topo.json'
county_geo = r'us_counties.geo.json'
county_topo = r'us_counties.topo.json'
or_topo = r'or_counties.topo.json'
vis = Visualization(width=960, height=500)
vis.data['countries'] = Data(
name='countries',
url=world_topo,
format={'type': 'topojson', 'feature': 'world-countries'}
)
geo_transform = Transform(
type='geopath', value="data", projection='winkel3', scale=200,
translate=[480, 250]
)
geo_from = MarkRef(data='countries', transform=[geo_transform])
enter_props = PropertySet(
stroke=ValueRef(value='#000000'),
path=ValueRef(field='path')
)
update_props = PropertySet(fill=ValueRef(value='steelblue'))
mark_props = MarkProperties(enter=enter_props, update=update_props)
vis.marks.append(
Mark(type='path', from_=geo_from, properties=mark_props)
)
vis.to_json('vega.json')
#Convenience Method
geo_data = [{'name': 'countries',
'url': world_topo,
'feature': 'world-countries'}]
vis = Map(geo_data=geo_data, scale=200)
vis.to_json('vega.json')
#States & Counties
geo_data = [{'name': 'counties',
'url': county_topo,
'feature': 'us_counties.geo'},
{'name': 'states',
'url': state_topo,
'feature': 'us_states.geo'}
]
vis = Map(geo_data=geo_data, scale=1000, projection='albersUsa')
del vis.marks[1].properties.update
vis.marks[0].properties.update.fill.value = '#084081'
vis.marks[1].properties.enter.stroke.value = '#fff'
vis.marks[0].properties.enter.stroke.value = '#7bccc4'
vis.to_json('vega.json')
#Choropleth
import json
import pandas as pd
#Map the county codes we have in our geometry to those in the
#county_data file, which contains additional rows we don't need
with open('us_counties.topo.json', 'r') as f:
get_id = json.load(f)
#A little FIPS code munging
new_geoms = []
for geom in get_id['objects']['us_counties.geo']['geometries']:
geom['properties']['FIPS'] = int(geom['properties']['FIPS'])
new_geoms.append(geom)
get_id['objects']['us_counties.geo']['geometries'] = new_geoms
with open('us_counties.topo.json', 'w') as f:
json.dump(get_id, f)
#Grab the FIPS codes and load them into a dataframe
geometries = get_id['objects']['us_counties.geo']['geometries']
county_codes = [x['properties']['FIPS'] for x in geometries]
county_df = pd.DataFrame({'FIPS': county_codes}, dtype=str)
county_df = county_df.astype(int)
#Read into Dataframe, cast to int for consistency
df = pd.read_csv('data/us_county_data.csv', na_values=[' '])
df['FIPS'] = df['FIPS'].astype(int)
#Perform an inner join, pad NA's with data from nearest county
merged = pd.merge(df, county_df, on='FIPS', how='inner')
merged = merged.fillna(method='pad')
geo_data = [{'name': 'counties',
'url': county_topo,
'feature': 'us_counties.geo'}]
vis = Map(data=merged, geo_data=geo_data, scale=1100, projection='albersUsa',
data_bind='Employed_2011', data_key='FIPS',
map_key={'counties': 'properties.FIPS'})
vis.marks[0].properties.enter.stroke_opacity = ValueRef(value=0.5)
#Change our domain for an even inteager
vis.scales['color'].domain = [0, 189000]
vis.legend(title='Number Employed 2011')
vis.to_json('vega.json')
#Lets look at different stats
vis.rebind(column='Civilian_labor_force_2011', brew='BuPu')
vis.to_json('vega.json')
vis.rebind(column='Unemployed_2011', brew='PuBu')
vis.to_json('vega.json')
vis.rebind(column='Unemployment_rate_2011', brew='YlGnBu')
vis.to_json('vega.json')
vis.rebind(column='Median_Household_Income_2011', brew='RdPu')
vis.to_json('vega.json')
#Mapping US State Level Data
state_data = pd.read_csv('data/US_Unemployment_Oct2012.csv')
geo_data = [{'name': 'states',
'url': state_topo,
'feature': 'us_states.geo'}]
vis = Map(data=state_data, geo_data=geo_data, scale=1000,
projection='albersUsa', data_bind='Unemployment', data_key='NAME',
map_key={'states': 'properties.NAME'})
vis.legend(title='Unemployment (%)')
vis.to_json('vega.json')
#Iterating State Level Data
yoy = pd.read_table('data/State_Unemp_YoY.txt', delim_whitespace=True)
#Standardize State names to match TopoJSON for keying
names = []
for row in yoy.iterrows():
pieces = row[1]['NAME'].split('_')
together = ' '.join(pieces)
names.append(together.title())
yoy['NAME'] = names
geo_data = [{'name': 'states',
'url': state_topo,
'feature': 'us_states.geo'}]
vis = Map(data=yoy, geo_data=geo_data, scale=1000,
projection='albersUsa', data_bind='AUG_2012', data_key='NAME',
map_key={'states': 'properties.NAME'}, brew='YlGnBu')
#Custom threshold scale
vis.scales[0].type='threshold'
vis.scales[0].domain = [0, 2, 4, 6, 8, 10, 12]
vis.legend(title='Unemployment (%)')
vis.to_json('vega.json')
#Rebind and set our scale again
vis.rebind(column='AUG_2013', brew='YlGnBu')
vis.scales[0].type='threshold'
vis.scales[0].domain = [0, 2, 4, 6, 8, 10, 12]
vis.to_json('vega.json')
vis.rebind(column='CHANGE', brew='YlGnBu')
vis.scales[0].type='threshold'
vis.scales[0].domain = [-1.5, -1.3, -1.1, 0, 0.1, 0.3, 0.5, 0.8]
vis.legends[0].title = "YoY Change in Unemployment (%)"
vis.to_json('vega.json')
#Oregon County-level population data
or_data = pd.read_table('data/OR_County_Data.txt', delim_whitespace=True)
or_data['July_2012_Pop']= or_data['July_2012_Pop'].astype(int)
#Standardize keys
with open('or_counties.topo.json', 'r') as f:
counties = json.load(f)
def split_county(name):
parts = name.split(' ')
parts.pop(-1)
return ''.join(parts).upper()
#A little FIPS code munging
new_geoms = []
for geom in counties['objects']['or_counties.geo']['geometries']:
geom['properties']['COUNTY'] = split_county(geom['properties']['COUNTY'])
new_geoms.append(geom)
counties['objects']['or_counties.geo']['geometries'] = new_geoms
with open('or_counties.topo.json', 'w') as f:
json.dump(counties, f)
geo_data = [{'name': 'states',
'url': state_topo,
'feature': 'us_states.geo'},
{'name': 'or_counties',
'url': or_topo,
'feature': 'or_counties.geo'}]
vis = Map(data=or_data, geo_data=geo_data, scale=3700,
translate=[1480, 830],
projection='albersUsa', data_bind='July_2012_Pop', data_key='NAME',
map_key={'or_counties': 'properties.COUNTY'})
vis.marks[0].properties.update.fill.value = '#c2c2c2'
vis.to_json('vega.json')
| mit |
sinhrks/scikit-learn | examples/svm/plot_custom_kernel.py | 171 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
cadrev/Titanic-Prediction | data-munging.py | 1 | 3412 | #
# Title : Data munging(AKA cleaning) the Titanic Data
# Author : Felan Carlo Garcia
#
# Notes:
# -- Code is based on the Kaggle Python Tutorial
# -- data cleaning prior to implementing a machine learning algorithm.
import numpy as np
import pandas as pd
def processdata(filename, outputname):
df = pd.read_csv(filename,header=0)
# Make a new column 'Gender' and EmbarkedNum to convert the string
# information into an integer value.
# We do this because general machine learning algorithms do not
# work on string values.
df['Gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)
df['EmbarkedNum'] = df['Embarked'].map({'S': 0, 'C': 1, 'Q': 1}).astype(int)
# Executing the code:
# --print df[df['Age'].isnull()][Sex']-- shows that the titanic data contains
# some null values of the ages of the passengers.
# In this case, we can either drop the row or we can assign an arbitrary
# value to fill the missing data.
# For this code, arbitrary age data is obtained by using the median
# age data of the passengers. We make a new column 'AgeFill' and place
# the median data on the missing values instead of directly modifying
# the 'Age' column
df['AgeFill'] = df['Age']
for i in range(0, 2):
for j in range(0, 3):
median = df[(df['Gender'] == i) & (df['Pclass'] == j+1)]['Age'].dropna().median()
df.loc[ (df.Age.isnull()) & (df.Gender == i) & (df.Pclass == j+1),'AgeFill'] = median
# We add a new column 'AgeIsNull' to know which records has a missing
# values previously.
# We then interpolate the missing values from the 'Fare' column.
df['AgeIsNull'] = pd.isnull(df.Age).astype(int)
df['Fare'] = df['Fare'].interpolate()
# ------------- Feature Engineering Part --------------------
# Feature Engineering is the process of using domain/expert
# knowledge of the data to create features that make machine
# learning algorithms work better.
#
# In this case, studying the data shows that women and children
# have higher survival rates compared to men. Thus we add
# two additional features: 'Female' and 'Children', in an attempt
# to assist our learning model in its prediction.
# At the same time we add features Age*Class and FamilySize
# as additional engineered feature that may help our learning
# model
df['Children'] = df['AgeFill'].map(lambda x: 1 if x < 6.0 else 0)
df['Female'] = df['Gender'].map(lambda x: 1 if x == 0 else 0)
df['FamilySize'] = df['SibSp'] + df['Parch']
df['Age*Class'] = df['AgeFill'] * df['Pclass']
# Since most machine learning algorithms don't work on strings,
# we drop the columns in our pandas dataframe containing object
# datatypes.
# The code:
# --print df.dtypes[df.dtypes.map(lambda x: x=='object')]--
# will show which columns are made of object datatypes.
#
# In this case these are the following columns containing
# object.string:
# Age, Name, Sex, Ticket, Cabin, Embarked, Fare
#
# We drop the following objects columns along with the other data
# since they wont likely contribute to our machine learning
# prediction
df = df.drop(['Age','Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1)
df.to_csv(outputname, sep=',', index=False)
return df
def main():
print processdata('titanic-data-shuffled.csv', 'final-data.csv')
if __name__ == '__main__':
main()
| mit |
IsCoolEntertainment/debpkg_python-pyzmq | examples/bench/plot_latency.py | 12 | 2229 | """Plot latency data from messaging benchmarks.
To generate the data for each library, I started the server and then did
the following for each client::
from xmlrpc_client import client
for i in range(9):
s = '0'*10**i
print s
%timeit client.echo(s)
"""
from matplotlib.pylab import *
rawdata = """# Data in milliseconds
Bytes JSONRPC PYRO XMLRPC pyzmq_copy pyzmq_nocopy
1 2.15 0.186 2.07 0.111 0.136
10 2.49 0.187 1.87 0.115 0.137
100 2.5 0.189 1.9 0.126 0.138
1000 2.54 0.196 1.91 0.129 0.141
10000 2.91 0.271 2.77 0.204 0.197
100000 6.65 1.44 9.17 0.961 0.546
1000000 50.2 15.8 81.5 8.39 2.25
10000000 491 159 816 91.7 25.2
100000000 5010 1560 8300 893 248
"""
with open('latency.csv','w') as f:
f.writelines(rawdata)
data = csv2rec('latency.csv',delimiter='\t')
loglog(data.bytes, data.xmlrpc*1000, label='XMLRPC')
loglog(data.bytes, data.jsonrpc*1000, label='JSONRPC')
loglog(data.bytes, data.pyro*1000, label='Pyro')
loglog(data.bytes, data.pyzmq_nocopy*1000, label='PyZMQ')
loglog(data.bytes, len(data.bytes)*[60], label='Ping')
legend(loc=2)
title('Latency')
xlabel('Number of bytes')
ylabel('Round trip latency ($\mu s$)')
grid(True)
show()
savefig('latency.png')
clf()
semilogx(data.bytes, 1000/data.xmlrpc, label='XMLRPC')
semilogx(data.bytes, 1000/data.jsonrpc, label='JSONRPC')
semilogx(data.bytes, 1000/data.pyro, label='Pyro')
semilogx(data.bytes, 1000/data.pyzmq_nocopy, label='PyZMQ')
legend(loc=1)
xlabel('Number of bytes')
ylabel('Message/s')
title('Message Throughput')
grid(True)
show()
savefig('msgs_sec.png')
clf()
loglog(data.bytes, 1000/data.xmlrpc, label='XMLRPC')
loglog(data.bytes, 1000/data.jsonrpc, label='JSONRPC')
loglog(data.bytes, 1000/data.pyro, label='Pyro')
loglog(data.bytes, 1000/data.pyzmq_nocopy, label='PyZMQ')
legend(loc=3)
xlabel('Number of bytes')
ylabel('Message/s')
title('Message Throughput')
grid(True)
show()
savefig('msgs_sec_log.png')
clf()
semilogx(data.bytes, data.pyro/data.pyzmq_nocopy, label="No-copy")
semilogx(data.bytes, data.pyro/data.pyzmq_copy, label="Copy")
xlabel('Number of bytes')
ylabel('Ratio throughputs')
title('PyZMQ Throughput/Pyro Throughput')
grid(True)
legend(loc=2)
show()
savefig('msgs_sec_ratio.png')
| lgpl-3.0 |
toobaz/pandas | pandas/tests/indexes/datetimes/test_ops.py | 2 | 18288 | from datetime import datetime
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCDateOffset
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
PeriodIndex,
Series,
Timestamp,
bdate_range,
date_range,
)
from pandas.tests.test_base import Ops
import pandas.util.testing as tm
from pandas.tseries.offsets import BDay, BMonthEnd, CDay, Day, Hour
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH#7206
msg = "'Series' object has no attribute '{}'"
for op in ["year", "day", "second", "weekday"]:
with pytest.raises(AttributeError, match=msg.format(op)):
getattr(self.dt_series, op)
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
msg = "'Series' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
s.weekday
def test_repeat_range(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range("1/1/2000", "1/1/2001")
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
index = pd.date_range("2001-01-01", periods=2, freq="D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-02", "2001-01-02"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range("2001-01-01", periods=2, freq="2D", tz=tz)
exp = pd.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-03", "2001-01-03"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(["2001-01-01", "NaT", "2003-01-01"], tz=tz)
exp = pd.DatetimeIndex(
[
"2001-01-01",
"2001-01-01",
"2001-01-01",
"NaT",
"NaT",
"NaT",
"2003-01-01",
"2003-01-01",
"2003-01-01",
],
tz=tz,
)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self, tz_naive_fixture):
tz = tz_naive_fixture
reps = 2
msg = "the 'axis' parameter is not supported"
rng = pd.date_range(start="2016-01-01", periods=2, freq="30Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
]
)
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
with pytest.raises(ValueError, match=msg):
np.repeat(rng, reps, axis=1)
def test_resolution(self, tz_naive_fixture):
tz = tz_naive_fixture
for freq, expected in zip(
["A", "Q", "M", "D", "H", "T", "S", "L", "U"],
[
"day",
"day",
"day",
"day",
"hour",
"minute",
"second",
"millisecond",
"microsecond",
],
):
idx = pd.date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
assert idx.resolution == expected
def test_value_counts_unique(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 7735
idx = pd.date_range("2011-01-01 09:00", freq="H", periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
exp_idx = pd.date_range("2011-01-01 18:00", freq="-1H", periods=10, tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64")
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range("2011-01-01 09:00", freq="H", periods=10, tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(
[
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 08:00",
"2013-01-01 08:00",
pd.NaT,
],
tz=tz,
)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00"], tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00", pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(
DatetimeIndex,
(
[0, 1, 0],
[0, 0, -1],
[0, -1, -1],
["2015", "2015", "2016"],
["2015", "2015", "2014"],
),
):
assert idx[0] in idx
@pytest.mark.parametrize(
"idx",
[
DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03"], freq="D", name="idx"
),
DatetimeIndex(
["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"],
freq="H",
name="tzidx",
tz="Asia/Tokyo",
),
],
)
def test_order_with_freq(self, idx):
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
@pytest.mark.parametrize(
"index_dates,expected_dates",
[
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
[pd.NaT, "2011-01-03", "2011-01-05", "2011-01-02", pd.NaT],
[pd.NaT, pd.NaT, "2011-01-02", "2011-01-03", "2011-01-05"],
),
],
)
def test_order_without_freq(self, index_dates, expected_dates, tz_naive_fixture):
tz = tz_naive_fixture
# without freq
index = DatetimeIndex(index_dates, tz=tz, name="idx")
expected = DatetimeIndex(expected_dates, tz=tz, name="idx")
ordered = index.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = index.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = index.sort_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep="last")
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep="last")
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
@pytest.mark.parametrize(
"freq",
[
"A",
"2A",
"-2A",
"Q",
"-1Q",
"M",
"-1M",
"D",
"3D",
"-3D",
"W",
"-1W",
"H",
"2H",
"-2H",
"T",
"2T",
"S",
"-3S",
],
)
def test_infer_freq(self, freq):
# GH 11018
idx = pd.date_range("2011-01-01 09:00:00", freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq="infer")
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat(self, tz_naive_fixture):
tz = tz_naive_fixture
assert pd.DatetimeIndex._na_value is pd.NaT
assert pd.DatetimeIndex([])._na_value is pd.NaT
idx = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert idx.hasnans is False
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(["2011-01-01", "NaT"], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans is True
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific")
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz="US/Pacific")
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.astype(object))
assert not idx.astype(object).equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
@pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []])
@pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48H", Hour(48)])
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_freq_setter(self, values, freq, tz):
# GH 20678
idx = DatetimeIndex(values, tz=tz)
# can set to an offset, converting from string if necessary
idx.freq = freq
assert idx.freq == freq
assert isinstance(idx.freq, ABCDateOffset)
# can reset to None
idx.freq = None
assert idx.freq is None
def test_freq_setter_errors(self):
# GH 20678
idx = DatetimeIndex(["20180101", "20180103", "20180105"])
# setting with an incompatible freq
msg = (
"Inferred frequency 2D from passed values does not conform to "
"passed frequency 5D"
)
with pytest.raises(ValueError, match=msg):
idx.freq = "5D"
# setting with non-freq string
with pytest.raises(ValueError, match="Invalid frequency"):
idx.freq = "foo"
def test_offset_deprecated(self):
# GH 20716
idx = pd.DatetimeIndex(["20180101", "20180102"])
# getter deprecated
with tm.assert_produces_warning(FutureWarning):
idx.offset
# setter deprecated
with tm.assert_produces_warning(FutureWarning):
idx.offset = BDay()
class TestBusinessDatetimeIndex:
def setup_method(self, method):
self.rng = bdate_range(START, END)
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_pickle_unpickle(self):
unpickled = tm.round_trip_pickle(self.rng)
assert unpickled.freq is not None
def test_copy(self):
cp = self.rng.copy()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_shift(self):
shifted = self.rng.shift(5)
assert shifted[0] == self.rng[5]
assert shifted.freq == self.rng.freq
shifted = self.rng.shift(-5)
assert shifted[5] == self.rng[0]
assert shifted.freq == self.rng.freq
shifted = self.rng.shift(0)
assert shifted[0] == self.rng[0]
assert shifted.freq == self.rng.freq
rng = date_range(START, END, freq=BMonthEnd())
shifted = rng.shift(1, freq=BDay())
assert shifted[0] == rng[0] + BDay()
def test_equals(self):
assert not self.rng.equals(list(self.rng))
def test_identical(self):
t1 = self.rng.copy()
t2 = self.rng.copy()
assert t1.identical(t2)
# name
t1 = t1.rename("foo")
assert t1.equals(t2)
assert not t1.identical(t2)
t2 = t2.rename("foo")
assert t1.identical(t2)
# freq
t2v = Index(t2.values)
assert t1.equals(t2v)
assert not t1.identical(t2v)
class TestCustomDatetimeIndex:
def setup_method(self, method):
self.rng = bdate_range(START, END, freq="C")
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_copy(self):
cp = self.rng.copy()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_shift(self):
shifted = self.rng.shift(5)
assert shifted[0] == self.rng[5]
assert shifted.freq == self.rng.freq
shifted = self.rng.shift(-5)
assert shifted[5] == self.rng[0]
assert shifted.freq == self.rng.freq
shifted = self.rng.shift(0)
assert shifted[0] == self.rng[0]
assert shifted.freq == self.rng.freq
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", pd.errors.PerformanceWarning)
rng = date_range(START, END, freq=BMonthEnd())
shifted = rng.shift(1, freq=CDay())
assert shifted[0] == rng[0] + CDay()
def test_shift_periods(self):
# GH#22458 : argument 'n' was deprecated in favor of 'periods'
idx = pd.date_range(start=START, end=END, periods=3)
tm.assert_index_equal(idx.shift(periods=0), idx)
tm.assert_index_equal(idx.shift(0), idx)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=True):
tm.assert_index_equal(idx.shift(n=0), idx)
def test_pickle_unpickle(self):
unpickled = tm.round_trip_pickle(self.rng)
assert unpickled.freq is not None
def test_equals(self):
assert not self.rng.equals(list(self.rng))
| bsd-3-clause |
piyueh/PetIBM | examples/api_examples/oscillatingcylinder2dRe100_GPU/scripts/plotDragCoefficient.py | 2 | 2057 | """
Plot the drag coefficient over 4 oscillation cycles.
"""
import pathlib
import numpy
from matplotlib import pyplot
from scipy import signal
# Read the drag force from file.
simu_dir = pathlib.Path(__file__).parents[1]
data_dir = simu_dir / 'output'
filepath = data_dir / 'forces-0.txt'
with open(filepath, 'r') as infile:
t, fx = numpy.loadtxt(infile, dtype=numpy.float64,
unpack=True, usecols=(0, 1))
# Set the parameters of the kinematics.
KC = 5.0 # Keulegan-Carpenter number
D = 1.0 # cylinder diameter
f = 0.2 # frequency of oscillation
w = 2 * numpy.pi * f # angular frequency
Am = KC * D / (2 * numpy.pi) # amplitude of oscillation
rho = 1.0 # fluid density
Um = w * Am # maximum translational velocity of cylinder
V = numpy.pi * D**2 / 4 # volume of cylinder
# Add force due to body acceleration.
ax = w**2 * Am * numpy.sin(w * t)
fx += rho * V * ax
# Get the drag coefficient.
cd = fx / (0.5 * rho * Um**2 * D)
# Compute and print info abount extrema of the drag coefficient.
idx_min = signal.argrelextrema(fx, numpy.less_equal, order=100)[0][1:-1]
t_min = t[idx_min]
print('Non-dimensional time-interval between minima:\n\t{}'
.format(f * (t_min[1:] - t_min[:-1])))
cd_min = cd[idx_min]
print('Drag coefficient valleys: {}'.format(cd_min))
idx_max = signal.argrelextrema(fx, numpy.greater_equal, order=100)[0][1:]
t_max = t[idx_max]
print('Non-dimensional time-interval between maxima:\n\t{}'
.format(f * (t_max[1:] - t_max[:-1])))
cd_max = cd[idx_max]
print('Drag coefficient peaks: {}'.format(cd_max))
# Plot the drag coefficient over the 4 cycles.
pyplot.rcParams['font.size'] = 16
pyplot.rcParams['font.family'] = 'serif'
fig, ax = pyplot.subplots(figsize=(8.0, 4.0))
ax.grid()
ax.set_xlabel('$t / T$')
ax.set_ylabel('$C_D$')
ax.plot(f * t, cd)
ax.axis((0.0, 4.0, -6.0, 6.0))
fig.tight_layout()
pyplot.show()
# Save the figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'dragCoefficient.png'
fig.savefig(str(filepath), dpi=300)
| bsd-3-clause |
lammy/artisan | setup-mac3.py | 9 | 8968 | """
This is a setup.py script generated by py2applet
Usage:
python3 setup-mac3.py py2app
"""
# manually remove sample-data mpl subdirectory from Python installation:
# sudo rm -rf /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/matplotlib/mpl-data/sample_data
from distutils import sysconfig
their_parse_makefile = sysconfig.parse_makefile
def my_parse_makefile(filename, g):
their_parse_makefile(filename, g)
g['MACOSX_DEPLOYMENT_TARGET'] = '10.6'
sysconfig.parse_makefile = my_parse_makefile
import sys, os
from setuptools import setup
import string
from plistlib import Plist
import artisanlib
# current version of Artisan
VERSION = artisanlib.__version__
LICENSE = 'GNU General Public License (GPL)'
QTDIR = r'/Developer/Applications/Qt/'
APP = ['artisan.py']
DATA_FILES = [
"LICENSE.txt",
("../Resources/qt_plugins/iconengines", [QTDIR + r'/plugins/iconengines/libqsvgicon.dylib']),
("../Resources/qt_plugins/imageformats", [QTDIR + r'/plugins/imageformats/libqsvg.dylib']),
("../Resources/qt_plugins/imageformats", [QTDIR + r'/plugins/imageformats/libqjpeg.dylib']),
("../Resources/qt_plugins/imageformats", [QTDIR + r'/plugins/imageformats/libqgif.dylib']),
("../Resources/qt_plugins/imageformats", [QTDIR + r'/plugins/imageformats/libqtiff.dylib']),
# standard QT translation needed to get the Application menu bar and
# the standard dialog elements translated
("../translations", [QTDIR + r'/translations/qt_de.qm']),
("../translations", [QTDIR + r'/translations/qt_es.qm']),
("../translations", [QTDIR + r'/translations/qt_fr.qm']),
("../translations", [QTDIR + r'/translations/qt_sv.qm']),
("../translations", [QTDIR + r'/translations/qt_zh_CN.qm']),
("../translations", [QTDIR + r'/translations/qt_zh_TW.qm']),
("../translations", [QTDIR + r'/translations/qt_ko.qm']),
("../translations", [QTDIR + r'/translations/qt_pt.qm']),
("../translations", [QTDIR + r'/translations/qt_ru.qm']),
("../translations", [QTDIR + r'/translations/qt_ar.qm']),
("../translations", [QTDIR + r'/translations/qt_ja.qm']),
("../translations", [QTDIR + r'/translations/qt_hu.qm']),
("../translations", [r"translations/artisan_de.qm"]),
("../translations", [r"translations/artisan_es.qm"]),
("../translations", [r"translations/artisan_fr.qm"]),
("../translations", [r"translations/artisan_sv.qm"]),
("../translations", [r'translations/artisan_zh_CN.qm']),
("../translations", [r'translations/artisan_zh_TW.qm']),
("../translations", [r'translations/artisan_ko.qm']),
("../translations", [r'translations/artisan_pt.qm']),
("../translations", [r'translations/artisan_ru.qm']),
("../translations", [r'translations/artisan_ar.qm']),
("../translations", [r"translations/artisan_it.qm"]),
("../translations", [r"translations/artisan_el.qm"]),
("../translations", [r"translations/artisan_no.qm"]),
("../translations", [r"translations/artisan_nl.qm"]),
("../translations", [r"translations/artisan_fi.qm"]),
("../translations", [r"translations/artisan_tr.qm"]),
("../translations", [r"translations/artisan_ja.qm"]),
("../translations", [r"translations/artisan_hu.qm"]),
("../translations", [r"translations/artisan_he.qm"]),
("../Resources", [r"qt.conf"]),
("../Resources", [r"artisanProfile.icns"]),
("../Resources", [r"artisanAlarms.icns"]),
("../Resources", [r"artisanPalettes.icns"]),
("../Resources", [r"artisanWheel.icns"]),
("../Resources", [r"includes/Humor-Sans.ttf"]),
]
plist = Plist.fromFile('Info3.plist')
plist.update({ 'CFBundleDisplayName': 'Artisan',
'CFBundleGetInfoString': 'Artisan, Roast Logger',
'CFBundleIdentifier': 'com.google.code.p.Artisan',
'CFBundleShortVersionString': VERSION,
'CFBundleVersion': 'Artisan ' + VERSION,
'LSMinimumSystemVersion': '10.6',
'LSMultipleInstancesProhibited': 'false',
'LSPrefersPPC': False,
'LSArchitecturePriority': 'x86_64',
'NSHumanReadableCopyright': LICENSE,
})
OPTIONS = {
'strip':True,
'argv_emulation': False, # this would confuses GUI processing
'semi_standalone': False,
'site_packages': True,
'dylib_excludes': ['phonon','QtDBus','QtDeclarative','QtDesigner',
'QtHelp','QtMultimedia','QtNetwork',
'QtOpenGL','QtScript','QtScriptTools',
'QtSql','QtTest','QtXmlPatterns','QtWebKit'],
# 'packages': ['matplotlib'], # with this the full pkg is copied to Resources/lib/python3.4
'packages': ['yoctopuce'],
'optimize': 2,
'compressed': True,
'iconfile': 'artisan.icns',
'arch': 'x86_64',
'matplotlib_backends': '-', # '-' for only-imported or explicit 'qt4agg'; without this the full pkg is copied to Resources/lib/python3.4
'includes': ['serial',
'PyQt4',
'PyQt4.QtCore',
'PyQt4.QtGui',
'PyQt4.QtSvg',
'PyQt4.QtXml'],
'excludes' : ['_tkagg','_ps','_fltkagg','Tkinter','Tkconstants',
'_agg','_cairo','_gtk','gtkcairo','pydoc','sqlite3',
'bsddb','curses','tcl',
'_wxagg','_gtagg','_cocoaagg','_wx'],
'plist' : plist}
setup(
name='Artisan',
version=VERSION,
author='YOUcouldbeTOO',
author_email='zaub.ERASE.org@yahoo.com',
license=LICENSE,
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app']
)
os.system(r'cp README.txt dist')
os.system(r'cp LICENSE.txt dist')
os.system(r'mkdir dist/Wheels')
os.system(r'mkdir dist/Wheels/Cupping')
os.system(r'mkdir dist/Wheels/Other')
os.system(r'mkdir dist/Wheels/Roasting')
os.system(r'cp Wheels/Cupping/* dist/Wheels/Cupping')
os.system(r'cp Wheels/Other/* dist/Wheels/Other')
os.system(r'cp Wheels/Roasting/* dist/Wheels/Roasting')
os.chdir('./dist')
# to prevent the error "Artisan.app/Contents/Resources/lib/python3.3/config-3.3m/Makefile'" on startup
# generated by v0.8 of py2app:
#os.system(r'cp /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/config-3.3m/Makefile ./Artisan.app/Contents/Resources/lib/python3.3/config-3.3m/')
# delete unused Qt.framework files (py2app exclude does not seem to work)
print('*** Removing unused Qt frameworks ***')
for fw in [
'phonon',
'QtDeclarative',
'QtHelp',
'QtMultimedia',
'QtNetwork',
'QtOpenGL',
'QtScript',
'QtScriptTools',
'QtSql',
'QtTest',
'QtWebKit',
'QtXMLPatterns']:
for root,dirs,files in os.walk('./Artisan.app/Contents/Frameworks/' + fw + ".framework"):
for file in files:
print('Deleting', file)
os.remove(os.path.join(root,file))
print('*** Removing Qt debug libs ***')
for root, dirs, files in os.walk('.'):
for file in files:
if 'debug' in file:
print('Deleting', file)
os.remove(os.path.join(root,file))
elif file.startswith('test_'):
print('Deleting', file)
os.remove(os.path.join(root,file))
elif '_tests' in file:
print('Deleting', file)
os.remove(os.path.join(root,file))
elif file.endswith('.pyc') and file != "site.pyc":
print('Deleting', file)
os.remove(os.path.join(root,file))
# remove also all .h .in .cpp .cc .html files
elif file.endswith('.h') and file != "pyconfig.h":
print('Deleting', file)
os.remove(os.path.join(root,file))
elif file.endswith('.in'):
print('Deleting', file)
os.remove(os.path.join(root,file))
elif file.endswith('.cpp'):
print('Deleting', file)
os.remove(os.path.join(root,file))
elif file.endswith('.cc'):
print('Deleting', file)
os.remove(os.path.join(root,file))
# .afm files should not be removed as without matplotlib will fail on startup
# elif file.endswith('.afm'):
# print('Deleting', file)
# os.remove(os.path.join(root,file))
# remove test files
for dir in dirs:
if 'tests' in dir:
for r,d,f in os.walk(os.path.join(root,dir)):
for fl in f:
print('Deleting', os.path.join(r,fl))
os.remove(os.path.join(r,fl))
os.chdir('..')
os.system(r"rm artisan-mac-" + VERSION + r".dmg")
os.system(r'hdiutil create artisan-mac-' + VERSION + r'.dmg -volname "Artisan" -fs HFS+ -srcfolder "dist"')
# otool -L dist/Artisan.app/Contents/MacOS/Artisan
| gpl-3.0 |
arjoly/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
matthew-tucker/mne-python | examples/time_frequency/plot_source_power_spectrum.py | 19 | 1929 | """
=========================================================
Compute power spectrum densities of the sources with dSPM
=========================================================
Returns an STC file containing the PSD (in dB) of each of the sources.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, compute_source_psd
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
# Setup for reading the raw data
raw = io.Raw(raw_fname, verbose=False)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, exclude='bads')
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
label = mne.read_label(fname_label)
stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
pick_ori="normal", n_fft=n_fft, label=label)
stc.save('psd_dSPM')
###############################################################################
# View PSD of sources in label
plt.plot(1e3 * stc.times, stc.data.T)
plt.xlabel('Frequency (Hz)')
plt.ylabel('PSD (dB)')
plt.title('Source Power Spectrum (PSD)')
plt.show()
| bsd-3-clause |
RomainBrault/scikit-learn | sklearn/utils/deprecation.py | 36 | 2418 | import warnings
__all__ = ["deprecated", ]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecation.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra : string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
| bsd-3-clause |
haisland0909/Denoising-Dirty-Documents | script/prediction.py | 1 | 5057 | # coding: UTF8
from sklearn.pipeline import FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn import preprocessing
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
import sklearn.linear_model
import img_to_pickle as i_p
import features as f
import classify
import preprocessing as pre
import pickle
import numpy as np
import pandas as pd
import datetime
import os
ROOT = os.path.abspath(os.path.dirname(__file__))
SUBMISSION_DIR = ROOT.replace("script", "tmp/submission")
clf_dict = {
'LR': {
"name": 'L2 Logistic Regression',
"clf": sklearn.linear_model.LogisticRegression(penalty='l2', dual=False, C=0.01),
},
'GB2': {
"name": 'Gradient Boosting New',
"clf": GradientBoostingRegressor(random_state=1, learning_rate=0.05,
n_estimators=3000, subsample=0.8,
max_features=0.3, min_samples_split=2,
min_samples_leaf=1, max_depth=7)
},
"RF": {
"name": "RandomForest",
"clf": RandomForestRegressor(max_depth=7, max_features=0.4,
min_samples_leaf=10, min_samples_split=2,
n_jobs=-1, n_estimators=1000)
},
'SGDR': {
"name": 'SGD Regression',
"clf": sklearn.linear_model.SGDRegressor(penalty='l2'),
}
}
def zero_one(x):
return min(max(x, 0.), 1.)
def convert_testdata(test_gray_data):
data_df = f.make_test_df(test_gray_data)
fu = FeatureUnion(transformer_list=f.feature_transformer_rule)
Std = preprocessing.StandardScaler()
X_test = fu.fit_transform(data_df)
#X_test = Std.fit_transform(X_test)
return X_test
def convert_traindata(train_gray_data, labels):
data_df = f.make_data_df(train_gray_data, labels)
fu = FeatureUnion(transformer_list=f.feature_transformer_rule)
Std = preprocessing.StandardScaler()
X_train = fu.fit_transform(data_df)
y_train = np.concatenate(data_df["label"].apply(lambda x: x.flatten()))
X_train = Std.fit_transform(X_train)
return X_train, y_train
def prediction(clf_name):
print "****************classifier****************"
print clf_dict[clf_name]["clf"]
clf = clf_dict[clf_name]["clf"]
_, _, _, train_gray_data, test_gray_data, _, labels = i_p.load_data()
train_keys = train_gray_data.keys()
test_keys = test_gray_data.keys()
train_df = f.make_data_df(train_gray_data, labels)
test_df = f.make_test_df(test_gray_data)
train_df = train_df.reset_index()
test_df = test_df.reset_index()
train_df.columns = ["pngname", "input", "label"]
test_df.columns = ["pngname", "input"]
# operation check
if clf_name == "SGDB":
# train_df, train_keys, test_df, test_keys = pre.make_checkdata(mode="df")
# train_df, train_keys, _, _ = pre.make_checkdata(mode="df")
for i in xrange(len(train_keys)):
train_X, train_y = classify.set_traindata(train_df, train_keys[i])
clf.partial_fit(train_X, train_y)
else:
# operation check
# train_df, train_keys, _, _ = pre.make_checkdata(mode="df")
fu = FeatureUnion(transformer_list=f.feature_transformer_rule)
train_X = fu.fit_transform(train_df)
train_y = np.concatenate(train_df["label"].apply(lambda x: x.flatten()))
train_X, train_y = classify.downsampling_data(train_X, train_y, 0.2)
clf.fit(train_X, train_y)
clf_dir = os.path.abspath(os.path.dirname(__file__)) +\
"/../tmp/fit_instance/"
now = datetime.datetime.now()
savefile = clf_dir + clf_name + now.strftime("%Y_%m_%d_%H_%M_%S") + ".pickle"
fi = open(savefile, "w")
pickle.dump(clf, fi)
fi.close()
for i in xrange(len(test_keys)):
test_img = test_df[(test_df["pngname"] == test_keys[i])]["input"].as_matrix()[0]
imgname = test_keys[i]
shape = test_img.shape
test_img = {test_keys[i]: test_img}
X_test = convert_testdata(test_img)
output = clf.predict(X_test)
output = np.asarray(output)
zo = np.vectorize(zero_one)
output = zo(output).reshape(shape)
tmp = []
for row in xrange(len(output)):
for column in xrange(len(output[row])):
id_ = imgname + "_" + str(row + 1) + "_" + str(column + 1)
value = output[row][column]
pix = [id_, value]
tmp.append(pix)
if i == 0:
predict_df = pd.DataFrame(tmp)
else:
tmp_df = pd.DataFrame(tmp)
predict_df = pd.concat([predict_df, tmp_df])
predict_df.columns = ["id", "value"]
now = datetime.datetime.now()
submission_path = SUBMISSION_DIR + "/submission_" + now.strftime("%Y_%m_%d_%H_%M_%S") + ".csv"
predict_df.to_csv(submission_path, header=True, index=False)
if __name__ == '__main__':
clf_name = "RF"
prediction(clf_name)
| apache-2.0 |
skudriashev/incubator-airflow | setup.py | 2 | 9592 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import imp
import logging
import os
import pip
import sys
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
version = imp.load_source(
'airflow.version', os.path.join('airflow', 'version.py')).version
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
def git_version(version):
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted changes
are present.
"""
repo = None
try:
import git
repo = git.Repo('.git')
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
except Exception as e:
logger.warning('Git repo not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
# is it release of `version` ?
try:
tag = repo.git.describe(
match='[0-9]*', exact_match=True,
tags=True, dirty=True)
assert tag == version, (tag, version)
return '.release:{version}+{sha}'.format(version=version,
sha=sha)
except git.GitCommandError:
return '.dev0+{sha}'.format(sha=sha)
else:
return 'no_git_version'
def write_version(filename=os.path.join(*['airflow',
'git_version'])):
text = "{}".format(git_version(version))
with open(filename, 'w') as a:
a.write(text)
async = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
azure = ['azure-storage>=0.34.0']
sendgrid = ['sendgrid>=5.2.0']
celery = [
'celery>=4.0.0',
'flower>=0.7.3'
]
cgroups = [
'cgroupspy>=0.1.4',
]
crypto = ['cryptography>=0.9.3']
dask = [
'distributed>=1.15.2, <2'
]
databricks = ['requests>=2.5.1, <3']
datadog = ['datadog>=0.14.0']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker-py>=1.6.0']
emr = ['boto3>=1.0.0']
gcp_api = [
'httplib2',
'google-api-python-client>=1.5.0, <1.6.0',
'oauth2client>=2.0.2, <2.1.0',
'PyOpenSSL',
'google-cloud-dataflow',
'pandas-gbq'
]
hdfs = ['snakebite>=2.7.8']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
jira = ['JIRA>1.0.7']
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'impyla>=0.13.3',
'unicodecsv>=0.14.1'
]
jdbc = ['jaydebeapi>=1.1.1']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.14.1']
mysql = ['mysqlclient>=1.3.6']
rabbitmq = ['librabbitmq>=1.6.1']
oracle = ['cx_Oracle>=5.1.2']
postgres = ['psycopg2>=2.7.1']
ssh = ['paramiko>=2.1.1']
salesforce = ['simple-salesforce>=0.72']
s3 = [
'boto>=2.36.0',
'filechunkio>=1.6',
]
samba = ['pysmbclient>=0.1.3']
slack = ['slackclient>=1.0.0']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
ldap = ['ldap3>=0.9.9.1']
kerberos = ['pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8',
'kerberos>=1.2.5']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
qds = ['qds-sdk>=1.9.6']
cloudant = ['cloudant>=0.5.9,<2.0'] # major update coming soon, clamp to 0.x
redis = ['redis>=2.10.5']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant
devel = [
'click',
'freezegun',
'jira',
'lxml>=3.3.4',
'mock',
'moto',
'nose',
'nose-ignore-docstring==0.2',
'nose-timer',
'parameterized',
'rednose',
'paramiko',
'requests_mock'
]
devel_minreq = devel + mysql + doc + password + s3 + cgroups
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = devel + all_dbs + doc + samba + s3 + slack + crypto + oracle + docker + ssh
def do_setup():
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(exclude=['tests*']),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.3, <0.9',
'bleach==2.0.0',
'configparser>=3.5.0, <3.6.0',
'croniter>=0.3.17, <0.4',
'dill>=0.2.2, <0.3',
'flask>=0.11, <0.12',
'flask-admin==1.4.1',
'flask-cache>=0.13.1, <0.14',
'flask-login==0.2.11',
'flask-swagger==0.2.13',
'flask-wtf==0.14',
'funcsigs==1.0.0',
'future>=0.16.0, <0.17',
'gitpython>=2.0.2',
'gunicorn>=19.3.0, <19.4.0', # 19.4.? seemed to have issues
'jinja2>=2.7.3, <2.9.0',
'lxml>=3.6.0, <4.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'psutil>=4.2.0, <5.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.14.2',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=0.9.8',
'tabulate>=0.7.5, <0.8.0',
'thrift>=0.9.2',
'zope.deprecation>=4.0, <5.0',
],
extras_require={
'all': devel_all,
'all_dbs': all_dbs,
'async': async,
'azure': azure,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'emr': emr,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'kerberos': kerberos,
'ldap': ldap,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
's3': s3,
'salesforce': salesforce,
'samba': samba,
'sendgrid' : sendgrid,
'slack': slack,
'ssh': ssh,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'jira': jira,
'redis': redis,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='dev@airflow.incubator.apache.org',
url='http://airflow.incubator.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/incubator/airflow/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
},
)
if __name__ == "__main__":
do_setup()
| apache-2.0 |
fbagirov/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
sjsrey/pysal | pysal/lib/common.py | 4 | 3385 | import copy
import sys
import time
# external imports
import numpy as np
import numpy.linalg as la
import scipy as sp
import scipy.stats as stats
from libpysal.cg.kdtree import KDTree
from scipy.spatial.distance import pdist, cdist
import pandas
try:
from patsy import PatsyError
except ImportError:
PatsyError = Exception
RTOL = .00001
ATOL = 1e-7
MISSINGVALUE = None
######################
# Decorators/Utils #
######################
# import numba.jit OR create mimic decorator and set existence flag
try:
from numba import jit
HAS_JIT = True
except ImportError:
def jit(function=None, **kwargs):
"""Mimic numba.jit() with synthetic wrapper
"""
if function is not None:
def wrapped(*original_args, **original_kw):
"""Case 1 - structure of a standard decorator
i.e., jit(function)(*args, **kwargs)
"""
return function(*original_args, **original_kw)
return wrapped
else:
def partial_inner(func):
"""Case 2 - returns Case 1
i.e., jit()(function)(*args, **kwargs)
"""
return jit(func)
return partial_inner
HAS_JIT = False
def simport(modname):
"""
Safely import a module without raising an error.
Parameters
-----------
modname : str
module name needed to import
Returns
--------
tuple of (True, Module) or (False, None) depending on whether the import
succeeded.
Notes
------
Wrapping this function around an iterative context or a with context would
allow the module to be used without necessarily attaching it permanently in
the global namespace:
for t,mod in simport('pandas'):
if t:
mod.DataFrame()
else:
#do alternative behavior here
del mod #or don't del, your call
instead of:
t, mod = simport('pandas')
if t:
mod.DataFrame()
else:
#do alternative behavior here
The first idiom makes it work kind of a like a with statement.
"""
try:
exec('import {}'.format(modname))
return True, eval(modname)
except:
return False, None
def requires(*args, **kwargs):
"""
Decorator to wrap functions with extra dependencies:
Arguments
---------
args : list
list of strings containing module to import
verbose : bool
boolean describing whether to print a warning message on import
failure
Returns
-------
Original function is all arg in args are importable, otherwise returns a
function that passes.
"""
v = kwargs.pop('verbose', True)
wanted = copy.deepcopy(args)
def inner(function):
available = [simport(arg)[0] for arg in args]
if all(available):
return function
else:
def passer(*args,**kwargs):
if v:
missing = [arg for i, arg in enumerate(wanted) if not available[i]]
print(('missing dependencies: {d}'.format(d=missing)))
print(('not running {}'.format(function.__name__)))
else:
pass
return passer
return inner
| bsd-3-clause |
JosmanPS/scikit-learn | sklearn/semi_supervised/label_propagation.py | 128 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
kedz/cuttsum | old/python/cuttsum/readers.py | 1 | 2494 | import codecs
import numpy as np
from sklearn.feature_extraction import DictVectorizer
def gold_reader(bow_file, l_file, sim_idx, vector=u'latent'):
op = codecs.open
sims = []
vectors = []
labels = []
unicodes = []
last_hour = None
with op(bow_file, u'r', u'utf-8') as bf, op(l_file, u'r', u'utf-8') as lf:
header = lf.readline().strip()
b_line = bf.readline()
l_line = lf.readline()
while b_line and l_line:
b_datum = b_line.strip().split(u'\t')
b_hour, b_stream_id, b_sent_id, b_unicode = b_datum[0:4]
bow = {x:1 for x in b_datum[4].split(u' ')}
l_datum = l_line.strip().split(u'\t')
l_hour, l_stream_id, l_sent_id = l_datum[0:3]
sim = float(l_datum[sim_idx])
lvec = [float(x) for x in l_datum[6:]]
b_label = (b_hour, b_stream_id, b_sent_id)
l_label = (l_hour, l_stream_id, l_sent_id)
assert b_label == l_label
if b_hour != last_hour:
if last_hour is not None:
n_points = len(sims)
sims = np.array(sims)
if vector == u'latent':
vectors = np.array(vectors)
elif vector == u'bow':
vctr = DictVectorizer()
vectors = vctr.fit_transform(vectors)
unicodes = np.array(unicodes, dtype=(unicode, 1000))
yield (last_hour, labels, unicodes, sims, vectors)
sims = []
vectors = []
labels = []
unicodes = []
last_hour = b_hour
sims.append(sim)
if vector == u'latent':
vectors.append(lvec)
elif vector == u'bow':
vectors.append(bow)
labels.append(b_label)
unicodes.append(b_unicode)
b_line = bf.readline()
l_line = lf.readline()
if len(vectors) > 0:
n_points = len(sims)
sims = np.array(sims)
if vector == u'latent':
vectors = np.array(vectors)
elif vector == u'bow':
vctr = DictVectorizer()
vectors = vctr.fit_transform(vectors)
unicodes = np.array(unicodes, dtype=(unicode, 1000))
yield (last_hour, labels, unicodes, sims, vectors)
| apache-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/jupyter_core/tests/dotipython/profile_default/ipython_console_config.py | 24 | 21691 | # Configuration file for ipython-console.
c = get_config()
#------------------------------------------------------------------------------
# ZMQTerminalIPythonApp configuration
#------------------------------------------------------------------------------
# ZMQTerminalIPythonApp will inherit config from: TerminalIPythonApp,
# BaseIPythonApplication, Application, InteractiveShellApp, IPythonConsoleApp,
# ConnectionFileMixin
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.ZMQTerminalIPythonApp.hide_initial_ns = True
# set the heartbeat port [default: random]
# c.ZMQTerminalIPythonApp.hb_port = 0
# A list of dotted module names of IPython extensions to load.
# c.ZMQTerminalIPythonApp.extensions = []
# Execute the given command string.
# c.ZMQTerminalIPythonApp.code_to_run = ''
# Path to the ssh key to use for logging in to the ssh server.
# c.ZMQTerminalIPythonApp.sshkey = ''
# The date format used by logging formatters for %(asctime)s
# c.ZMQTerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the control (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.control_port = 0
# Reraise exceptions encountered loading IPython extensions?
# c.ZMQTerminalIPythonApp.reraise_ipython_extension_failures = False
# Set the log level by value or name.
# c.ZMQTerminalIPythonApp.log_level = 30
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.ZMQTerminalIPythonApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.ZMQTerminalIPythonApp.pylab = None
# Run the module as a script.
# c.ZMQTerminalIPythonApp.module_to_run = ''
# Whether to display a banner upon starting IPython.
# c.ZMQTerminalIPythonApp.display_banner = True
# dotted module name of an IPython extension to load.
# c.ZMQTerminalIPythonApp.extra_extension = ''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.ZMQTerminalIPythonApp.verbose_crash = False
# Whether to overwrite existing config files when copying
# c.ZMQTerminalIPythonApp.overwrite = False
# The IPython profile to use.
# c.ZMQTerminalIPythonApp.profile = 'default'
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.ZMQTerminalIPythonApp.force_interact = False
# List of files to run at IPython startup.
# c.ZMQTerminalIPythonApp.exec_files = []
# Start IPython quickly by skipping the loading of config files.
# c.ZMQTerminalIPythonApp.quick = False
# The Logging format template
# c.ZMQTerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.ZMQTerminalIPythonApp.copy_config_files = False
# set the stdin (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.stdin_port = 0
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.ZMQTerminalIPythonApp.extra_config_file = ''
# lines of code to run at IPython startup.
# c.ZMQTerminalIPythonApp.exec_lines = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.ZMQTerminalIPythonApp.gui = None
# A file to be run
# c.ZMQTerminalIPythonApp.file_to_run = ''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.ZMQTerminalIPythonApp.matplotlib = None
# Suppress warning messages about legacy config files
# c.ZMQTerminalIPythonApp.ignore_old_config = False
# set the iopub (PUB) port [default: random]
# c.ZMQTerminalIPythonApp.iopub_port = 0
#
# c.ZMQTerminalIPythonApp.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ZMQTerminalIPythonApp.connection_file = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.ZMQTerminalIPythonApp.ipython_dir = ''
# The SSH server to use to connect to the kernel.
# c.ZMQTerminalIPythonApp.sshserver = ''
# Set to display confirmation dialog on exit. You can always use 'exit' or
# 'quit', to force a direct exit without any confirmation.
# c.ZMQTerminalIPythonApp.confirm_exit = True
# set the shell (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.shell_port = 0
# The name of the default kernel to start.
# c.ZMQTerminalIPythonApp.kernel_name = 'python'
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.ZMQTerminalIPythonApp.pylab_import_all = True
# Connect to an already running kernel
# c.ZMQTerminalIPythonApp.existing = ''
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ZMQTerminalIPythonApp.ip = ''
#------------------------------------------------------------------------------
# ZMQTerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of TerminalInteractiveShell that uses the 0MQ kernel
# ZMQTerminalInteractiveShell will inherit config from:
# TerminalInteractiveShell, InteractiveShell
#
# c.ZMQTerminalInteractiveShell.history_length = 10000
# auto editing of files with syntax errors.
# c.ZMQTerminalInteractiveShell.autoedit_syntax = False
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.ZMQTerminalInteractiveShell.display_page = False
#
# c.ZMQTerminalInteractiveShell.debug = False
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQTerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.ZMQTerminalInteractiveShell.logstart = False
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQTerminalInteractiveShell.cache_size = 1000
# The shell program to be used for paging.
# c.ZMQTerminalInteractiveShell.pager = 'less'
# The name of the logfile to use.
# c.ZMQTerminalInteractiveShell.logfile = ''
# Save multi-line entries as one entry in readline history
# c.ZMQTerminalInteractiveShell.multiline_history = True
#
# c.ZMQTerminalInteractiveShell.readline_remove_delims = '-/~'
# Enable magic commands to be called without the leading %.
# c.ZMQTerminalInteractiveShell.automagic = True
# Prefix to add to outputs coming from clients other than this one.
#
# Only relevant if include_other_output is True.
# c.ZMQTerminalInteractiveShell.other_output_prefix = '[remote] '
#
# c.ZMQTerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQTerminalInteractiveShell.color_info = True
# Callable object called via 'callable' image handler with one argument, `data`,
# which is `msg["content"]["data"]` where `msg` is the message from iopub
# channel. For exmaple, you can find base64 encoded PNG data as
# `data['image/png']`.
# c.ZMQTerminalInteractiveShell.callable_image_handler = None
# Command to invoke an image viewer program when you are using 'stream' image
# handler. This option is a list of string where the first element is the
# command itself and reminders are the options for the command. Raw image data
# is given as STDIN to the program.
# c.ZMQTerminalInteractiveShell.stream_image_handler = []
#
# c.ZMQTerminalInteractiveShell.separate_out2 = ''
# Autoindent IPython code entered interactively.
# c.ZMQTerminalInteractiveShell.autoindent = True
# The part of the banner to be printed after the profile
# c.ZMQTerminalInteractiveShell.banner2 = ''
# Don't call post-execute functions that have failed in the past.
# c.ZMQTerminalInteractiveShell.disable_failing_post_execute = False
# Deprecated, use PromptManager.out_template
# c.ZMQTerminalInteractiveShell.prompt_out = 'Out[\\#]: '
#
# c.ZMQTerminalInteractiveShell.object_info_string_level = 0
#
# c.ZMQTerminalInteractiveShell.separate_out = ''
# Automatically call the pdb debugger after every exception.
# c.ZMQTerminalInteractiveShell.pdb = False
# Deprecated, use PromptManager.in_template
# c.ZMQTerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
#
# c.ZMQTerminalInteractiveShell.separate_in = '\n'
#
# c.ZMQTerminalInteractiveShell.wildcards_case_sensitive = True
# Enable auto setting the terminal title.
# c.ZMQTerminalInteractiveShell.term_title = False
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQTerminalInteractiveShell.deep_reload = False
# Deprecated, use PromptManager.in2_template
# c.ZMQTerminalInteractiveShell.prompt_in2 = ' .\\D.: '
# Whether to include output from clients other than this one sharing the same
# kernel.
#
# Outputs are not displayed until enter is pressed.
# c.ZMQTerminalInteractiveShell.include_other_output = False
# Preferred object representation MIME type in order. First matched MIME type
# will be used.
# c.ZMQTerminalInteractiveShell.mime_preference = ['image/png', 'image/jpeg', 'image/svg+xml']
#
# c.ZMQTerminalInteractiveShell.readline_use = True
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQTerminalInteractiveShell.autocall = 0
# The part of the banner to be printed before the profile
# c.ZMQTerminalInteractiveShell.banner1 = 'Python 3.4.3 |Continuum Analytics, Inc.| (default, Mar 6 2015, 12:07:41) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\nAnaconda is brought to you by Continuum Analytics.\nPlease check out: http://continuum.io/thanks and https://binstar.org\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# Handler for image type output. This is useful, for example, when connecting
# to the kernel in which pylab inline backend is activated. There are four
# handlers defined. 'PIL': Use Python Imaging Library to popup image; 'stream':
# Use an external program to show the image. Image will be fed into the STDIN
# of the program. You will need to configure `stream_image_handler`;
# 'tempfile': Use an external program to show the image. Image will be saved in
# a temporally file and the program is called with the temporally file. You
# will need to configure `tempfile_image_handler`; 'callable': You can set any
# Python callable which is called with the image data. You will need to
# configure `callable_image_handler`.
# c.ZMQTerminalInteractiveShell.image_handler = None
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQTerminalInteractiveShell.colors = 'LightBG'
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.ZMQTerminalInteractiveShell.editor = 'mate -w'
# Show rewritten input, e.g. for autocall.
# c.ZMQTerminalInteractiveShell.show_rewritten_input = True
#
# c.ZMQTerminalInteractiveShell.xmode = 'Context'
#
# c.ZMQTerminalInteractiveShell.quiet = False
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQTerminalInteractiveShell.ast_transformers = []
#
# c.ZMQTerminalInteractiveShell.ipython_dir = ''
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.ZMQTerminalInteractiveShell.confirm_exit = True
# Deprecated, use PromptManager.justify
# c.ZMQTerminalInteractiveShell.prompts_pad_left = True
# Timeout for giving up on a kernel (in seconds).
#
# On first connect and restart, the console tests whether the kernel is running
# and responsive by sending kernel_info_requests. This sets the timeout in
# seconds for how long the kernel can take before being presumed dead.
# c.ZMQTerminalInteractiveShell.kernel_timeout = 60
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.ZMQTerminalInteractiveShell.screen_length = 0
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.ZMQTerminalInteractiveShell.logappend = ''
# Command to invoke an image viewer program when you are using 'tempfile' image
# handler. This option is a list of string where the first element is the
# command itself and reminders are the options for the command. You can use
# {file} and {format} in the string to represent the location of the generated
# image file and image format.
# c.ZMQTerminalInteractiveShell.tempfile_image_handler = []
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# set the heartbeat port [default: random]
# c.KernelManager.hb_port = 0
# set the stdin (ROUTER) port [default: random]
# c.KernelManager.stdin_port = 0
#
# c.KernelManager.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.KernelManager.connection_file = ''
# set the control (ROUTER) port [default: random]
# c.KernelManager.control_port = 0
# set the shell (ROUTER) port [default: random]
# c.KernelManager.shell_port = 0
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = ''
# set the iopub (PUB) port [default: random]
# c.KernelManager.iopub_port = 0
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'minrk'
# Debug output in the Session
# c.Session.debug = False
# path to file containing execution key.
# c.Session.keyfile = ''
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The UUID identifying this session.
# c.Session.session = ''
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# execution key, for signing messages.
# c.Session.key = b''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
| mit |
wdurhamh/statsmodels | statsmodels/tsa/arima_model.py | 9 | 77514 | # Note: The information criteria add 1 to the number of parameters
# whenever the model has an AR or MA term since, in principle,
# the variance could be treated as a free parameter and restricted
# This code does not allow this, but it adds consistency with other
# packages such as gretl and X12-ARIMA
from __future__ import absolute_import
from statsmodels.compat.python import string_types, range
# for 2to3 with extensions
from datetime import datetime
import numpy as np
from scipy import optimize
from scipy.stats import t, norm
from scipy.signal import lfilter
from numpy import dot, log, zeros, pi
from numpy.linalg import inv
from statsmodels.tools.decorators import (cache_readonly,
resettable_cache)
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.wrapper as wrap
from statsmodels.regression.linear_model import yule_walker, GLS
from statsmodels.tsa.tsatools import (lagmat, add_trend,
_ar_transparams, _ar_invtransparams,
_ma_transparams, _ma_invtransparams,
unintegrate, unintegrate_levels)
from statsmodels.tsa.vector_ar import util
from statsmodels.tsa.ar_model import AR
from statsmodels.tsa.arima_process import arma2ma
from statsmodels.tools.numdiff import approx_hess_cs, approx_fprime_cs
from statsmodels.tsa.base.datetools import _index_date
from statsmodels.tsa.kalmanf import KalmanFilter
_armax_notes = """
Notes
-----
If exogenous variables are given, then the model that is fit is
.. math::
\\phi(L)(y_t - X_t\\beta) = \\theta(L)\epsilon_t
where :math:`\\phi` and :math:`\\theta` are polynomials in the lag
operator, :math:`L`. This is the regression model with ARMA errors,
or ARMAX model. This specification is used, whether or not the model
is fit using conditional sum of square or maximum-likelihood, using
the `method` argument in
:meth:`statsmodels.tsa.arima_model.%(Model)s.fit`. Therefore, for
now, `css` and `mle` refer to estimation methods only. This may
change for the case of the `css` model in future versions.
"""
_arma_params = """\
endog : array-like
The endogenous variable.
order : iterable
The (p,q) order of the model for the number of AR parameters,
differences, and MA parameters to use.
exog : array-like, optional
An optional array of exogenous variables. This should *not* include a
constant or trend. You can specify this in the `fit` method."""
_arma_model = "Autoregressive Moving Average ARMA(p,q) Model"
_arima_model = "Autoregressive Integrated Moving Average ARIMA(p,d,q) Model"
_arima_params = """\
endog : array-like
The endogenous variable.
order : iterable
The (p,d,q) order of the model for the number of AR parameters,
differences, and MA parameters to use.
exog : array-like, optional
An optional array of exogenous variables. This should *not* include a
constant or trend. You can specify this in the `fit` method."""
_predict_notes = """
Notes
-----
Use the results predict method instead.
"""
_results_notes = """
Notes
-----
It is recommended to use dates with the time-series models, as the
below will probably make clear. However, if ARIMA is used without
dates and/or `start` and `end` are given as indices, then these
indices are in terms of the *original*, undifferenced series. Ie.,
given some undifferenced observations::
1970Q1, 1
1970Q2, 1.5
1970Q3, 1.25
1970Q4, 2.25
1971Q1, 1.2
1971Q2, 4.1
1970Q1 is observation 0 in the original series. However, if we fit an
ARIMA(p,1,q) model then we lose this first observation through
differencing. Therefore, the first observation we can forecast (if
using exact MLE) is index 1. In the differenced series this is index
0, but we refer to it as 1 from the original series.
"""
_predict = """
%(Model)s model in-sample and out-of-sample prediction
Parameters
----------
%(params)s
start : int, str, or datetime
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
end : int, str, or datetime
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction.
exog : array-like, optional
If the model is an ARMAX and out-of-sample forecasting is
requested, exog must be given. Note that you'll need to pass
`k_ar` additional lags for any exogenous variables. E.g., if you
fit an ARMAX(2, q) model and want to predict 5 steps, you need 7
observations to do this.
dynamic : bool, optional
The `dynamic` keyword affects in-sample prediction. If dynamic
is False, then the in-sample lagged values are used for
prediction. If `dynamic` is True, then in-sample forecasts are
used in place of lagged dependent variables. The first forecasted
value is `start`.
%(extra_params)s
Returns
-------
%(returns)s
%(extra_section)s
"""
_predict_returns = """predict : array
The predicted values.
"""
_arma_predict = _predict % {"Model" : "ARMA",
"params" : """
params : array-like
The fitted parameters of the model.""",
"extra_params" : "",
"returns" : _predict_returns,
"extra_section" : _predict_notes}
_arma_results_predict = _predict % {"Model" : "ARMA", "params" : "",
"extra_params" : "",
"returns" : _predict_returns,
"extra_section" : _results_notes}
_arima_predict = _predict % {"Model" : "ARIMA",
"params" : """params : array-like
The fitted parameters of the model.""",
"extra_params" : """typ : str {'linear', 'levels'}
- 'linear' : Linear prediction in terms of the differenced
endogenous variables.
- 'levels' : Predict the levels of the original endogenous
variables.\n""", "returns" : _predict_returns,
"extra_section" : _predict_notes}
_arima_results_predict = _predict % {"Model" : "ARIMA",
"params" : "",
"extra_params" :
"""typ : str {'linear', 'levels'}
- 'linear' : Linear prediction in terms of the differenced
endogenous variables.
- 'levels' : Predict the levels of the original endogenous
variables.\n""",
"returns" : _predict_returns,
"extra_section" : _results_notes}
_arima_plot_predict_example = """ Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import pandas as pd
>>>
>>> dta = sm.datasets.sunspots.load_pandas().data[['SUNACTIVITY']]
>>> dta.index = pd.DatetimeIndex(start='1700', end='2009', freq='A')
>>> res = sm.tsa.ARMA(dta, (3, 0)).fit()
>>> fig, ax = plt.subplots()
>>> ax = dta.ix['1950':].plot(ax=ax)
>>> fig = res.plot_predict('1990', '2012', dynamic=True, ax=ax,
... plot_insample=False)
>>> plt.show()
.. plot:: plots/arma_predict_plot.py
"""
_plot_predict = ("""
Plot forecasts
""" + '\n'.join(_predict.split('\n')[2:])) % {
"params" : "",
"extra_params" : """alpha : float, optional
The confidence intervals for the forecasts are (1 - alpha)%
plot_insample : bool, optional
Whether to plot the in-sample series. Default is True.
ax : matplotlib.Axes, optional
Existing axes to plot with.""",
"returns" : """fig : matplotlib.Figure
The plotted Figure instance""",
"extra_section" : ('\n' + _arima_plot_predict_example +
'\n' + _results_notes)
}
_arima_plot_predict = ("""
Plot forecasts
""" + '\n'.join(_predict.split('\n')[2:])) % {
"params" : "",
"extra_params" : """alpha : float, optional
The confidence intervals for the forecasts are (1 - alpha)%
plot_insample : bool, optional
Whether to plot the in-sample series. Default is True.
ax : matplotlib.Axes, optional
Existing axes to plot with.""",
"returns" : """fig : matplotlib.Figure
The plotted Figure instance""",
"extra_section" : ('\n' + _arima_plot_predict_example +
'\n' +
'\n'.join(_results_notes.split('\n')[:3]) +
("""
This is hard-coded to only allow plotting of the forecasts in levels.
""") +
'\n'.join(_results_notes.split('\n')[3:]))
}
def cumsum_n(x, n):
if n:
n -= 1
x = np.cumsum(x)
return cumsum_n(x, n)
else:
return x
def _check_arima_start(start, k_ar, k_diff, method, dynamic):
if start < 0:
raise ValueError("The start index %d of the original series "
"has been differenced away" % start)
elif (dynamic or 'mle' not in method) and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE "
"or dynamic forecast. Got %d" % start)
def _get_predict_out_of_sample(endog, p, q, k_trend, k_exog, start, errors,
trendparam, exparams, arparams, maparams, steps,
method, exog=None):
"""
Returns endog, resid, mu of appropriate length for out of sample
prediction.
"""
if q:
resid = np.zeros(q)
if start and 'mle' in method or (start == p and not start == 0):
resid[:q] = errors[start-q:start]
elif start:
resid[:q] = errors[start-q-p:start-p]
else:
resid[:q] = errors[-q:]
else:
resid = None
y = endog
if k_trend == 1:
# use expectation not constant
if k_exog > 0:
#TODO: technically should only hold for MLE not
# conditional model. See #274.
# ensure 2-d for conformability
if np.ndim(exog) == 1 and k_exog == 1:
# have a 1d series of observations -> 2d
exog = exog[:, None]
elif np.ndim(exog) == 1:
# should have a 1d row of exog -> 2d
if len(exog) != k_exog:
raise ValueError("1d exog given and len(exog) != k_exog")
exog = exog[None, :]
X = lagmat(np.dot(exog, exparams), p, original='in', trim='both')
mu = trendparam * (1 - arparams.sum())
# arparams were reversed in unpack for ease later
mu = mu + (np.r_[1, -arparams[::-1]] * X).sum(1)[:, None]
else:
mu = trendparam * (1 - arparams.sum())
mu = np.array([mu]*steps)
elif k_exog > 0:
X = np.dot(exog, exparams)
#NOTE: you shouldn't have to give in-sample exog!
X = lagmat(X, p, original='in', trim='both')
mu = (np.r_[1, -arparams[::-1]] * X).sum(1)[:, None]
else:
mu = np.zeros(steps)
endog = np.zeros(p + steps - 1)
if p and start:
endog[:p] = y[start-p:start]
elif p:
endog[:p] = y[-p:]
return endog, resid, mu
def _arma_predict_out_of_sample(params, steps, errors, p, q, k_trend, k_exog,
endog, exog=None, start=0, method='mle'):
(trendparam, exparams,
arparams, maparams) = _unpack_params(params, (p, q), k_trend,
k_exog, reverse=True)
endog, resid, mu = _get_predict_out_of_sample(endog, p, q, k_trend, k_exog,
start, errors, trendparam,
exparams, arparams,
maparams, steps, method,
exog)
forecast = np.zeros(steps)
if steps == 1:
if q:
return mu[0] + np.dot(arparams, endog[:p]) + np.dot(maparams,
resid[:q])
else:
return mu[0] + np.dot(arparams, endog[:p])
if q:
i = 0 # if q == 1
else:
i = -1
for i in range(min(q, steps - 1)):
fcast = (mu[i] + np.dot(arparams, endog[i:i + p]) +
np.dot(maparams[:q - i], resid[i:i + q]))
forecast[i] = fcast
endog[i+p] = fcast
for i in range(i + 1, steps - 1):
fcast = mu[i] + np.dot(arparams, endog[i:i+p])
forecast[i] = fcast
endog[i+p] = fcast
#need to do one more without updating endog
forecast[steps - 1] = mu[steps - 1] + np.dot(arparams, endog[steps - 1:])
return forecast
def _arma_predict_in_sample(start, end, endog, resid, k_ar, method):
"""
Pre- and in-sample fitting for ARMA.
"""
if 'mle' in method:
fittedvalues = endog - resid # get them all then trim
else:
fittedvalues = endog[k_ar:] - resid
fv_start = start
if 'mle' not in method:
fv_start -= k_ar # start is in terms of endog index
fv_end = min(len(fittedvalues), end + 1)
return fittedvalues[fv_start:fv_end]
def _validate(start, k_ar, k_diff, dates, method):
if isinstance(start, (string_types, datetime)):
start = _index_date(start, dates)
start -= k_diff
if 'mle' not in method and start < k_ar - k_diff:
raise ValueError("Start must be >= k_ar for conditional "
"MLE or dynamic forecast. Got %s" % start)
return start
def _unpack_params(params, order, k_trend, k_exog, reverse=False):
p, q = order
k = k_trend + k_exog
maparams = params[k+p:]
arparams = params[k:k+p]
trend = params[:k_trend]
exparams = params[k_trend:k]
if reverse:
return trend, exparams, arparams[::-1], maparams[::-1]
return trend, exparams, arparams, maparams
def _unpack_order(order):
k_ar, k_ma, k = order
k_lags = max(k_ar, k_ma+1)
return k_ar, k_ma, order, k_lags
def _make_arma_names(data, k_trend, order, exog_names):
k_ar, k_ma = order
exog_names = exog_names or []
ar_lag_names = util.make_lag_names([data.ynames], k_ar, 0)
ar_lag_names = [''.join(('ar.', i)) for i in ar_lag_names]
ma_lag_names = util.make_lag_names([data.ynames], k_ma, 0)
ma_lag_names = [''.join(('ma.', i)) for i in ma_lag_names]
trend_name = util.make_lag_names('', 0, k_trend)
# ensure exog_names stays unchanged when the `fit` method
# is called multiple times.
if exog_names[-k_ma:] == ma_lag_names and \
exog_names[-(k_ar+k_ma):-k_ma] == ar_lag_names and \
(not exog_names or not trend_name or trend_name[0] == exog_names[0]):
return exog_names
exog_names = trend_name + exog_names + ar_lag_names + ma_lag_names
return exog_names
def _make_arma_exog(endog, exog, trend):
k_trend = 1 # overwritten if no constant
if exog is None and trend == 'c': # constant only
exog = np.ones((len(endog), 1))
elif exog is not None and trend == 'c': # constant plus exogenous
exog = add_trend(exog, trend='c', prepend=True)
elif exog is not None and trend == 'nc':
# make sure it's not holding constant from last run
if exog.var() == 0:
exog = None
k_trend = 0
if trend == 'nc':
k_trend = 0
return k_trend, exog
def _check_estimable(nobs, n_params):
if nobs <= n_params:
raise ValueError("Insufficient degrees of freedom to estimate")
class ARMA(tsbase.TimeSeriesModel):
__doc__ = tsbase._tsa_doc % {"model" : _arma_model,
"params" : _arma_params, "extra_params" : "",
"extra_sections" : _armax_notes %
{"Model" : "ARMA"}}
def __init__(self, endog, order, exog=None, dates=None, freq=None,
missing='none'):
super(ARMA, self).__init__(endog, exog, dates, freq, missing=missing)
exog = self.data.exog # get it after it's gone through processing
_check_estimable(len(self.endog), sum(order))
self.k_ar = k_ar = order[0]
self.k_ma = k_ma = order[1]
self.k_lags = max(k_ar, k_ma+1)
if exog is not None:
if exog.ndim == 1:
exog = exog[:, None]
k_exog = exog.shape[1] # number of exog. variables excl. const
else:
k_exog = 0
self.k_exog = k_exog
def _fit_start_params_hr(self, order):
"""
Get starting parameters for fit.
Parameters
----------
order : iterable
(p,q,k) - AR lags, MA lags, and number of exogenous variables
including the constant.
Returns
-------
start_params : array
A first guess at the starting parameters.
Notes
-----
If necessary, fits an AR process with the laglength selected according
to best BIC. Obtain the residuals. Then fit an ARMA(p,q) model via
OLS using these residuals for a first approximation. Uses a separate
OLS regression to find the coefficients of exogenous variables.
References
----------
Hannan, E.J. and Rissanen, J. 1982. "Recursive estimation of mixed
autoregressive-moving average order." `Biometrika`. 69.1.
"""
p, q, k = order
start_params = zeros((p+q+k))
endog = self.endog.copy() # copy because overwritten
exog = self.exog
if k != 0:
ols_params = GLS(endog, exog).fit().params
start_params[:k] = ols_params
endog -= np.dot(exog, ols_params).squeeze()
if q != 0:
if p != 0:
# make sure we don't run into small data problems in AR fit
nobs = len(endog)
maxlag = int(round(12*(nobs/100.)**(1/4.)))
if maxlag >= nobs:
maxlag = nobs - 1
armod = AR(endog).fit(ic='bic', trend='nc', maxlag=maxlag)
arcoefs_tmp = armod.params
p_tmp = armod.k_ar
# it's possible in small samples that optimal lag-order
# doesn't leave enough obs. No consistent way to fix.
if p_tmp + q >= len(endog):
raise ValueError("Proper starting parameters cannot"
" be found for this order with this "
"number of observations. Use the "
"start_params argument.")
resid = endog[p_tmp:] - np.dot(lagmat(endog, p_tmp,
trim='both'),
arcoefs_tmp)
if p < p_tmp + q:
endog_start = p_tmp + q - p
resid_start = 0
else:
endog_start = 0
resid_start = p - p_tmp - q
lag_endog = lagmat(endog, p, 'both')[endog_start:]
lag_resid = lagmat(resid, q, 'both')[resid_start:]
# stack ar lags and resids
X = np.column_stack((lag_endog, lag_resid))
coefs = GLS(endog[max(p_tmp + q, p):], X).fit().params
start_params[k:k+p+q] = coefs
else:
start_params[k+p:k+p+q] = yule_walker(endog, order=q)[0]
if q == 0 and p != 0:
arcoefs = yule_walker(endog, order=p)[0]
start_params[k:k+p] = arcoefs
# check AR coefficients
if p and not np.all(np.abs(np.roots(np.r_[1, -start_params[k:k + p]]
)) < 1):
raise ValueError("The computed initial AR coefficients are not "
"stationary\nYou should induce stationarity, "
"choose a different model order, or you can\n"
"pass your own start_params.")
# check MA coefficients
elif q and not np.all(np.abs(np.roots(np.r_[1, start_params[k + p:]]
)) < 1):
raise ValueError("The computed initial MA coefficients are not "
"invertible\nYou should induce invertibility, "
"choose a different model order, or you can\n"
"pass your own start_params.")
# check MA coefficients
return start_params
def _fit_start_params(self, order, method):
if method != 'css-mle': # use Hannan-Rissanen to get start params
start_params = self._fit_start_params_hr(order)
else: # use CSS to get start params
func = lambda params: -self.loglike_css(params)
#start_params = [.1]*(k_ar+k_ma+k_exog) # different one for k?
start_params = self._fit_start_params_hr(order)
if self.transparams:
start_params = self._invtransparams(start_params)
bounds = [(None,)*2]*sum(order)
mlefit = optimize.fmin_l_bfgs_b(func, start_params,
approx_grad=True, m=12,
pgtol=1e-7, factr=1e3,
bounds=bounds, iprint=-1)
start_params = self._transparams(mlefit[0])
return start_params
def score(self, params):
"""
Compute the score function at params.
Notes
-----
This is a numerical approximation.
"""
return approx_fprime_cs(params, self.loglike, args=(False,))
def hessian(self, params):
"""
Compute the Hessian at params,
Notes
-----
This is a numerical approximation.
"""
return approx_hess_cs(params, self.loglike, args=(False,))
def _transparams(self, params):
"""
Transforms params to induce stationarity/invertability.
Reference
---------
Jones(1980)
"""
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
newparams = np.zeros_like(params)
# just copy exogenous parameters
if k != 0:
newparams[:k] = params[:k]
# AR Coeffs
if k_ar != 0:
newparams[k:k+k_ar] = _ar_transparams(params[k:k+k_ar].copy())
# MA Coeffs
if k_ma != 0:
newparams[k+k_ar:] = _ma_transparams(params[k+k_ar:].copy())
return newparams
def _invtransparams(self, start_params):
"""
Inverse of the Jones reparameterization
"""
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
newparams = start_params.copy()
arcoefs = newparams[k:k+k_ar]
macoefs = newparams[k+k_ar:]
# AR coeffs
if k_ar != 0:
newparams[k:k+k_ar] = _ar_invtransparams(arcoefs)
# MA coeffs
if k_ma != 0:
newparams[k+k_ar:k+k_ar+k_ma] = _ma_invtransparams(macoefs)
return newparams
def _get_predict_start(self, start, dynamic):
# do some defaults
method = getattr(self, 'method', 'mle')
k_ar = getattr(self, 'k_ar', 0)
k_diff = getattr(self, 'k_diff', 0)
if start is None:
if 'mle' in method and not dynamic:
start = 0
else:
start = k_ar
self._set_predict_start_date(start) # else it's done in super
elif isinstance(start, int):
start = super(ARMA, self)._get_predict_start(start)
else: # should be on a date
#elif 'mle' not in method or dynamic: # should be on a date
start = _validate(start, k_ar, k_diff, self.data.dates,
method)
start = super(ARMA, self)._get_predict_start(start)
_check_arima_start(start, k_ar, k_diff, method, dynamic)
return start
def _get_predict_end(self, end, dynamic=False):
# pass through so predict works for ARIMA and ARMA
return super(ARMA, self)._get_predict_end(end)
def geterrors(self, params):
"""
Get the errors of the ARMA process.
Parameters
----------
params : array-like
The fitted ARMA parameters
order : array-like
3 item iterable, with the number of AR, MA, and exogenous
parameters, including the trend
"""
#start = self._get_predict_start(start) # will be an index of a date
#end, out_of_sample = self._get_predict_end(end)
params = np.asarray(params)
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
method = getattr(self, 'method', 'mle')
if 'mle' in method: # use KalmanFilter to get errors
(y, k, nobs, k_ar, k_ma, k_lags, newparams, Z_mat, m, R_mat,
T_mat, paramsdtype) = KalmanFilter._init_kalman_state(params,
self)
errors = KalmanFilter.geterrors(y, k, k_ar, k_ma, k_lags, nobs,
Z_mat, m, R_mat, T_mat,
paramsdtype)
if isinstance(errors, tuple):
errors = errors[0] # non-cython version returns a tuple
else: # use scipy.signal.lfilter
y = self.endog.copy()
k = self.k_exog + self.k_trend
if k > 0:
y -= dot(self.exog, params[:k])
k_ar = self.k_ar
k_ma = self.k_ma
(trendparams, exparams,
arparams, maparams) = _unpack_params(params, (k_ar, k_ma),
self.k_trend, self.k_exog,
reverse=False)
b, a = np.r_[1, -arparams], np.r_[1, maparams]
zi = zeros((max(k_ar, k_ma)))
for i in range(k_ar):
zi[i] = sum(-b[:i+1][::-1]*y[:i+1])
e = lfilter(b, a, y, zi=zi)
errors = e[0][k_ar:]
return errors.squeeze()
def predict(self, params, start=None, end=None, exog=None, dynamic=False):
method = getattr(self, 'method', 'mle') # don't assume fit
#params = np.asarray(params)
# will return an index of a date
start = self._get_predict_start(start, dynamic)
end, out_of_sample = self._get_predict_end(end, dynamic)
if out_of_sample and (exog is None and self.k_exog > 0):
raise ValueError("You must provide exog for ARMAX")
endog = self.endog
resid = self.geterrors(params)
k_ar = self.k_ar
if exog is not None:
# Note: we ignore currently the index of exog if it is available
exog = np.asarray(exog)
if self.k_exog == 1 and exog.ndim == 1:
exog = exog[:, None]
if out_of_sample != 0 and self.k_exog > 0:
# we need the last k_ar exog for the lag-polynomial
if self.k_exog > 0 and k_ar > 0 and not dynamic:
# need the last k_ar exog for the lag-polynomial
exog = np.vstack((self.exog[-k_ar:, self.k_trend:], exog))
if dynamic:
if self.k_exog > 0:
# need the last k_ar exog for the lag-polynomial
exog = np.vstack((self.exog[start - k_ar:, self.k_trend:], exog))
#TODO: now that predict does dynamic in-sample it should
# also return error estimates and confidence intervals
# but how? len(endog) is not tot_obs
out_of_sample += end - start + 1
return _arma_predict_out_of_sample(params, out_of_sample, resid,
k_ar, self.k_ma, self.k_trend,
self.k_exog, endog, exog,
start, method)
predictedvalues = _arma_predict_in_sample(start, end, endog, resid,
k_ar, method)
if out_of_sample:
forecastvalues = _arma_predict_out_of_sample(params, out_of_sample,
resid, k_ar,
self.k_ma,
self.k_trend,
self.k_exog, endog,
exog, method=method)
predictedvalues = np.r_[predictedvalues, forecastvalues]
return predictedvalues
predict.__doc__ = _arma_predict
def loglike(self, params, set_sigma2=True):
"""
Compute the log-likelihood for ARMA(p,q) model
Notes
-----
Likelihood used depends on the method set in fit
"""
method = self.method
if method in ['mle', 'css-mle']:
return self.loglike_kalman(params, set_sigma2)
elif method == 'css':
return self.loglike_css(params, set_sigma2)
else:
raise ValueError("Method %s not understood" % method)
def loglike_kalman(self, params, set_sigma2=True):
"""
Compute exact loglikelihood for ARMA(p,q) model by the Kalman Filter.
"""
return KalmanFilter.loglike(params, self, set_sigma2)
def loglike_css(self, params, set_sigma2=True):
"""
Conditional Sum of Squares likelihood function.
"""
k_ar = self.k_ar
k_ma = self.k_ma
k = self.k_exog + self.k_trend
y = self.endog.copy().astype(params.dtype)
nobs = self.nobs
# how to handle if empty?
if self.transparams:
newparams = self._transparams(params)
else:
newparams = params
if k > 0:
y -= dot(self.exog, newparams[:k])
# the order of p determines how many zeros errors to set for lfilter
b, a = np.r_[1, -newparams[k:k + k_ar]], np.r_[1, newparams[k + k_ar:]]
zi = np.zeros((max(k_ar, k_ma)), dtype=params.dtype)
for i in range(k_ar):
zi[i] = sum(-b[:i + 1][::-1] * y[:i + 1])
errors = lfilter(b, a, y, zi=zi)[0][k_ar:]
ssr = np.dot(errors, errors)
sigma2 = ssr/nobs
if set_sigma2:
self.sigma2 = sigma2
llf = -nobs/2.*(log(2*pi) + log(sigma2)) - ssr/(2*sigma2)
return llf
def fit(self, start_params=None, trend='c', method="css-mle",
transparams=True, solver='lbfgs', maxiter=50, full_output=1,
disp=5, callback=None, **kwargs):
"""
Fits ARMA(p,q) model using exact maximum likelihood via Kalman filter.
Parameters
----------
start_params : array-like, optional
Starting parameters for ARMA(p,q). If None, the default is given
by ARMA._fit_start_params. See there for more information.
transparams : bool, optional
Whehter or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980). If False,
no checking for stationarity or invertibility is done.
method : str {'css-mle','mle','css'}
This is the loglikelihood to maximize. If "css-mle", the
conditional sum of squares likelihood is maximized and its values
are used as starting values for the computation of the exact
likelihood via the Kalman filter. If "mle", the exact likelihood
is maximized via the Kalman Filter. If "css" the conditional sum
of squares likelihood is maximized. All three methods use
`start_params` as starting parameters. See above for more
information.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' includes constant,
'nc' no constant.
solver : str or None, optional
Solver to be used. The default is 'lbfgs' (limited memory
Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',
'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -
(conjugate gradient), 'ncg' (non-conjugate gradient), and
'powell'. By default, the limited memory BFGS uses m=12 to
approximate the Hessian, projected gradient tolerance of 1e-8 and
factr = 1e2. You can change these by using kwargs.
maxiter : int, optional
The maximum number of function evaluations. Default is 50.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is printed. For the default
l_bfgs_b solver, disp controls the frequency of the output during
the iterations. disp < 0 means no output in this case.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
Returns
-------
statsmodels.tsa.arima_model.ARMAResults class
See also
--------
statsmodels.base.model.LikelihoodModel.fit : for more information
on using the solvers.
ARMAResults : results class returned by fit
Notes
------
If fit by 'mle', it is assumed for the Kalman Filter that the initial
unkown state is zero, and that the inital variance is
P = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F')).reshape(r,
r, order = 'F')
"""
k_ar = self.k_ar
k_ma = self.k_ma
# enforce invertibility
self.transparams = transparams
endog, exog = self.endog, self.exog
k_exog = self.k_exog
self.nobs = len(endog) # this is overwritten if method is 'css'
# (re)set trend and handle exogenous variables
# always pass original exog
k_trend, exog = _make_arma_exog(endog, self.exog, trend)
# Check has something to estimate
if k_ar == 0 and k_ma == 0 and k_trend == 0 and k_exog == 0:
raise ValueError("Estimation requires the inclusion of least one "
"AR term, MA term, a constant or an exogenous "
"variable.")
# check again now that we know the trend
_check_estimable(len(endog), k_ar + k_ma + k_exog + k_trend)
self.k_trend = k_trend
self.exog = exog # overwrites original exog from __init__
# (re)set names for this model
self.exog_names = _make_arma_names(self.data, k_trend, (k_ar, k_ma),
self.exog_names)
k = k_trend + k_exog
# choose objective function
if k_ma == 0 and k_ar == 0:
method = "css" # Always CSS when no AR or MA terms
self.method = method = method.lower()
# adjust nobs for css
if method == 'css':
self.nobs = len(self.endog) - k_ar
if start_params is not None:
start_params = np.asarray(start_params)
else: # estimate starting parameters
start_params = self._fit_start_params((k_ar, k_ma, k), method)
if transparams: # transform initial parameters to ensure invertibility
start_params = self._invtransparams(start_params)
if solver == 'lbfgs':
kwargs.setdefault('pgtol', 1e-8)
kwargs.setdefault('factr', 1e2)
kwargs.setdefault('m', 12)
kwargs.setdefault('approx_grad', True)
mlefit = super(ARMA, self).fit(start_params, method=solver,
maxiter=maxiter,
full_output=full_output, disp=disp,
callback=callback, **kwargs)
params = mlefit.params
if transparams: # transform parameters back
params = self._transparams(params)
self.transparams = False # so methods don't expect transf.
normalized_cov_params = None # TODO: fix this
armafit = ARMAResults(self, params, normalized_cov_params)
armafit.mle_retvals = mlefit.mle_retvals
armafit.mle_settings = mlefit.mle_settings
return ARMAResultsWrapper(armafit)
#NOTE: the length of endog changes when we give a difference to fit
#so model methods are not the same on unfit models as fit ones
#starting to think that order of model should be put in instantiation...
class ARIMA(ARMA):
__doc__ = tsbase._tsa_doc % {"model" : _arima_model,
"params" : _arima_params, "extra_params" : "",
"extra_sections" : _armax_notes %
{"Model" : "ARIMA"}}
def __new__(cls, endog, order, exog=None, dates=None, freq=None,
missing='none'):
p, d, q = order
if d == 0: # then we just use an ARMA model
return ARMA(endog, (p, q), exog, dates, freq, missing)
else:
mod = super(ARIMA, cls).__new__(cls)
mod.__init__(endog, order, exog, dates, freq, missing)
return mod
def __init__(self, endog, order, exog=None, dates=None, freq=None,
missing='none'):
p, d, q = order
if d > 2:
#NOTE: to make more general, need to address the d == 2 stuff
# in the predict method
raise ValueError("d > 2 is not supported")
super(ARIMA, self).__init__(endog, (p, q), exog, dates, freq, missing)
self.k_diff = d
self._first_unintegrate = unintegrate_levels(self.endog[:d], d)
self.endog = np.diff(self.endog, n=d)
#NOTE: will check in ARMA but check again since differenced now
_check_estimable(len(self.endog), p+q)
if exog is not None:
self.exog = self.exog[d:]
if d == 1:
self.data.ynames = 'D.' + self.endog_names
else:
self.data.ynames = 'D{0:d}.'.format(d) + self.endog_names
# what about exog, should we difference it automatically before
# super call?
def _get_predict_start(self, start, dynamic):
"""
"""
#TODO: remove all these getattr and move order specification to
# class constructor
k_diff = getattr(self, 'k_diff', 0)
method = getattr(self, 'method', 'mle')
k_ar = getattr(self, 'k_ar', 0)
if start is None:
if 'mle' in method and not dynamic:
start = 0
else:
start = k_ar
elif isinstance(start, int):
start -= k_diff
try: # catch when given an integer outside of dates index
start = super(ARIMA, self)._get_predict_start(start,
dynamic)
except IndexError:
raise ValueError("start must be in series. "
"got %d" % (start + k_diff))
else: # received a date
start = _validate(start, k_ar, k_diff, self.data.dates,
method)
start = super(ARIMA, self)._get_predict_start(start, dynamic)
# reset date for k_diff adjustment
self._set_predict_start_date(start + k_diff)
return start
def _get_predict_end(self, end, dynamic=False):
"""
Returns last index to be forecast of the differenced array.
Handling of inclusiveness should be done in the predict function.
"""
end, out_of_sample = super(ARIMA, self)._get_predict_end(end, dynamic)
if 'mle' not in self.method and not dynamic:
end -= self.k_ar
return end - self.k_diff, out_of_sample
def fit(self, start_params=None, trend='c', method="css-mle",
transparams=True, solver='lbfgs', maxiter=50, full_output=1,
disp=5, callback=None, **kwargs):
"""
Fits ARIMA(p,d,q) model by exact maximum likelihood via Kalman filter.
Parameters
----------
start_params : array-like, optional
Starting parameters for ARMA(p,q). If None, the default is given
by ARMA._fit_start_params. See there for more information.
transparams : bool, optional
Whehter or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980). If False,
no checking for stationarity or invertibility is done.
method : str {'css-mle','mle','css'}
This is the loglikelihood to maximize. If "css-mle", the
conditional sum of squares likelihood is maximized and its values
are used as starting values for the computation of the exact
likelihood via the Kalman filter. If "mle", the exact likelihood
is maximized via the Kalman Filter. If "css" the conditional sum
of squares likelihood is maximized. All three methods use
`start_params` as starting parameters. See above for more
information.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' includes constant,
'nc' no constant.
solver : str or None, optional
Solver to be used. The default is 'lbfgs' (limited memory
Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',
'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -
(conjugate gradient), 'ncg' (non-conjugate gradient), and
'powell'. By default, the limited memory BFGS uses m=12 to
approximate the Hessian, projected gradient tolerance of 1e-8 and
factr = 1e2. You can change these by using kwargs.
maxiter : int, optional
The maximum number of function evaluations. Default is 50.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is printed. For the default
l_bfgs_b solver, disp controls the frequency of the output during
the iterations. disp < 0 means no output in this case.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
Returns
-------
`statsmodels.tsa.arima.ARIMAResults` class
See also
--------
statsmodels.base.model.LikelihoodModel.fit : for more information
on using the solvers.
ARIMAResults : results class returned by fit
Notes
------
If fit by 'mle', it is assumed for the Kalman Filter that the initial
unkown state is zero, and that the inital variance is
P = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F')).reshape(r,
r, order = 'F')
"""
mlefit = super(ARIMA, self).fit(start_params, trend,
method, transparams, solver,
maxiter, full_output, disp,
callback, **kwargs)
normalized_cov_params = None # TODO: fix this?
arima_fit = ARIMAResults(self, mlefit._results.params,
normalized_cov_params)
arima_fit.k_diff = self.k_diff
arima_fit.mle_retvals = mlefit.mle_retvals
arima_fit.mle_settings = mlefit.mle_settings
return ARIMAResultsWrapper(arima_fit)
def predict(self, params, start=None, end=None, exog=None, typ='linear',
dynamic=False):
# go ahead and convert to an index for easier checking
if isinstance(start, (string_types, datetime)):
start = _index_date(start, self.data.dates)
if typ == 'linear':
if not dynamic or (start != self.k_ar + self.k_diff and
start is not None):
return super(ARIMA, self).predict(params, start, end, exog,
dynamic)
else:
# need to assume pre-sample residuals are zero
# do this by a hack
q = self.k_ma
self.k_ma = 0
predictedvalues = super(ARIMA, self).predict(params, start,
end, exog,
dynamic)
self.k_ma = q
return predictedvalues
elif typ == 'levels':
endog = self.data.endog
if not dynamic:
predict = super(ARIMA, self).predict(params, start, end, exog,
dynamic)
start = self._get_predict_start(start, dynamic)
end, out_of_sample = self._get_predict_end(end)
d = self.k_diff
if 'mle' in self.method:
start += d - 1 # for case where d == 2
end += d - 1
# add each predicted diff to lagged endog
if out_of_sample:
fv = predict[:-out_of_sample] + endog[start:end+1]
if d == 2: #TODO: make a general solution to this
fv += np.diff(endog[start - 1:end + 1])
levels = unintegrate_levels(endog[-d:], d)
fv = np.r_[fv,
unintegrate(predict[-out_of_sample:],
levels)[d:]]
else:
fv = predict + endog[start:end + 1]
if d == 2:
fv += np.diff(endog[start - 1:end + 1])
else:
k_ar = self.k_ar
if out_of_sample:
fv = (predict[:-out_of_sample] +
endog[max(start, self.k_ar-1):end+k_ar+1])
if d == 2:
fv += np.diff(endog[start - 1:end + 1])
levels = unintegrate_levels(endog[-d:], d)
fv = np.r_[fv,
unintegrate(predict[-out_of_sample:],
levels)[d:]]
else:
fv = predict + endog[max(start, k_ar):end+k_ar+1]
if d == 2:
fv += np.diff(endog[start - 1:end + 1])
else:
#IFF we need to use pre-sample values assume pre-sample
# residuals are zero, do this by a hack
if start == self.k_ar + self.k_diff or start is None:
# do the first k_diff+1 separately
p = self.k_ar
q = self.k_ma
k_exog = self.k_exog
k_trend = self.k_trend
k_diff = self.k_diff
(trendparam, exparams,
arparams, maparams) = _unpack_params(params, (p, q),
k_trend,
k_exog,
reverse=True)
# this is the hack
self.k_ma = 0
predict = super(ARIMA, self).predict(params, start, end,
exog, dynamic)
if not start:
start = self._get_predict_start(start, dynamic)
start += k_diff
self.k_ma = q
return endog[start-1] + np.cumsum(predict)
else:
predict = super(ARIMA, self).predict(params, start, end,
exog, dynamic)
return endog[start-1] + np.cumsum(predict)
return fv
else: # pragma : no cover
raise ValueError("typ %s not understood" % typ)
predict.__doc__ = _arima_predict
class ARMAResults(tsbase.TimeSeriesModelResults):
"""
Class to hold results from fitting an ARMA model.
Parameters
----------
model : ARMA instance
The fitted model instance
params : array
Fitted parameters
normalized_cov_params : array, optional
The normalized variance covariance matrix
scale : float, optional
Optional argument to scale the variance covariance matrix.
Returns
--------
**Attributes**
aic : float
Akaike Information Criterion
:math:`-2*llf+2* df_model`
where `df_model` includes all AR parameters, MA parameters, constant
terms parameters on constant terms and the variance.
arparams : array
The parameters associated with the AR coefficients in the model.
arroots : array
The roots of the AR coefficients are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0
Stability requires that the roots in modulus lie outside the unit
circle.
bic : float
Bayes Information Criterion
-2*llf + log(nobs)*df_model
Where if the model is fit using conditional sum of squares, the
number of observations `nobs` does not include the `p` pre-sample
observations.
bse : array
The standard errors of the parameters. These are computed using the
numerical Hessian.
df_model : array
The model degrees of freedom = `k_exog` + `k_trend` + `k_ar` + `k_ma`
df_resid : array
The residual degrees of freedom = `nobs` - `df_model`
fittedvalues : array
The predicted values of the model.
hqic : float
Hannan-Quinn Information Criterion
-2*llf + 2*(`df_model`)*log(log(nobs))
Like `bic` if the model is fit using conditional sum of squares then
the `k_ar` pre-sample observations are not counted in `nobs`.
k_ar : int
The number of AR coefficients in the model.
k_exog : int
The number of exogenous variables included in the model. Does not
include the constant.
k_ma : int
The number of MA coefficients.
k_trend : int
This is 0 for no constant or 1 if a constant is included.
llf : float
The value of the log-likelihood function evaluated at `params`.
maparams : array
The value of the moving average coefficients.
maroots : array
The roots of the MA coefficients are the solution to
(1 + maparams[0]*z + maparams[1]*z**2 + ... + maparams[q-1]*z**q) = 0
Stability requires that the roots in modules lie outside the unit
circle.
model : ARMA instance
A reference to the model that was fit.
nobs : float
The number of observations used to fit the model. If the model is fit
using exact maximum likelihood this is equal to the total number of
observations, `n_totobs`. If the model is fit using conditional
maximum likelihood this is equal to `n_totobs` - `k_ar`.
n_totobs : float
The total number of observations for `endog`. This includes all
observations, even pre-sample values if the model is fit using `css`.
params : array
The parameters of the model. The order of variables is the trend
coefficients and the `k_exog` exognous coefficients, then the
`k_ar` AR coefficients, and finally the `k_ma` MA coefficients.
pvalues : array
The p-values associated with the t-values of the coefficients. Note
that the coefficients are assumed to have a Student's T distribution.
resid : array
The model residuals. If the model is fit using 'mle' then the
residuals are created via the Kalman Filter. If the model is fit
using 'css' then the residuals are obtained via `scipy.signal.lfilter`
adjusted such that the first `k_ma` residuals are zero. These zero
residuals are not returned.
scale : float
This is currently set to 1.0 and not used by the model or its results.
sigma2 : float
The variance of the residuals. If the model is fit by 'css',
sigma2 = ssr/nobs, where ssr is the sum of squared residuals. If
the model is fit by 'mle', then sigma2 = 1/nobs * sum(v**2 / F)
where v is the one-step forecast error and F is the forecast error
variance. See `nobs` for the difference in definitions depending on the
fit.
"""
_cache = {}
#TODO: use this for docstring when we fix nobs issue
def __init__(self, model, params, normalized_cov_params=None, scale=1.):
super(ARMAResults, self).__init__(model, params, normalized_cov_params,
scale)
self.sigma2 = model.sigma2
nobs = model.nobs
self.nobs = nobs
k_exog = model.k_exog
self.k_exog = k_exog
k_trend = model.k_trend
self.k_trend = k_trend
k_ar = model.k_ar
self.k_ar = k_ar
self.n_totobs = len(model.endog)
k_ma = model.k_ma
self.k_ma = k_ma
df_model = k_exog + k_trend + k_ar + k_ma
self._ic_df_model = df_model + 1
self.df_model = df_model
self.df_resid = self.nobs - df_model
self._cache = resettable_cache()
@cache_readonly
def arroots(self):
return np.roots(np.r_[1, -self.arparams])**-1
@cache_readonly
def maroots(self):
return np.roots(np.r_[1, self.maparams])**-1
@cache_readonly
def arfreq(self):
r"""
Returns the frequency of the AR roots.
This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the
roots.
"""
z = self.arroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2*pi)
@cache_readonly
def mafreq(self):
r"""
Returns the frequency of the MA roots.
This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the
roots.
"""
z = self.maroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2*pi)
@cache_readonly
def arparams(self):
k = self.k_exog + self.k_trend
return self.params[k:k+self.k_ar]
@cache_readonly
def maparams(self):
k = self.k_exog + self.k_trend
k_ar = self.k_ar
return self.params[k+k_ar:]
@cache_readonly
def llf(self):
return self.model.loglike(self.params)
@cache_readonly
def bse(self):
params = self.params
hess = self.model.hessian(params)
if len(params) == 1: # can't take an inverse, ensure 1d
return np.sqrt(-1./hess[0])
return np.sqrt(np.diag(-inv(hess)))
def cov_params(self): # add scale argument?
params = self.params
hess = self.model.hessian(params)
return -inv(hess)
@cache_readonly
def aic(self):
return -2 * self.llf + 2 * self._ic_df_model
@cache_readonly
def bic(self):
nobs = self.nobs
return -2 * self.llf + np.log(nobs) * self._ic_df_model
@cache_readonly
def hqic(self):
nobs = self.nobs
return -2 * self.llf + 2 * np.log(np.log(nobs)) * self._ic_df_model
@cache_readonly
def fittedvalues(self):
model = self.model
endog = model.endog.copy()
k_ar = self.k_ar
exog = model.exog # this is a copy
if exog is not None:
if model.method == "css" and k_ar > 0:
exog = exog[k_ar:]
if model.method == "css" and k_ar > 0:
endog = endog[k_ar:]
fv = endog - self.resid
# add deterministic part back in
#k = self.k_exog + self.k_trend
#TODO: this needs to be commented out for MLE with constant
#if k != 0:
# fv += dot(exog, self.params[:k])
return fv
@cache_readonly
def resid(self):
return self.model.geterrors(self.params)
@cache_readonly
def pvalues(self):
#TODO: same for conditional and unconditional?
df_resid = self.df_resid
return t.sf(np.abs(self.tvalues), df_resid) * 2
def predict(self, start=None, end=None, exog=None, dynamic=False):
return self.model.predict(self.params, start, end, exog, dynamic)
predict.__doc__ = _arma_results_predict
def _forecast_error(self, steps):
sigma2 = self.sigma2
ma_rep = arma2ma(np.r_[1, -self.arparams],
np.r_[1, self.maparams], nobs=steps)
fcasterr = np.sqrt(sigma2 * np.cumsum(ma_rep**2))
return fcasterr
def _forecast_conf_int(self, forecast, fcasterr, alpha):
const = norm.ppf(1 - alpha / 2.)
conf_int = np.c_[forecast - const * fcasterr,
forecast + const * fcasterr]
return conf_int
def forecast(self, steps=1, exog=None, alpha=.05):
"""
Out-of-sample forecasts
Parameters
----------
steps : int
The number of out of sample forecasts from the end of the
sample.
exog : array
If the model is an ARMAX, you must provide out of sample
values for the exogenous variables. This should not include
the constant.
alpha : float
The confidence intervals for the forecasts are (1 - alpha) %
Returns
-------
forecast : array
Array of out of sample forecasts
stderr : array
Array of the standard error of the forecasts.
conf_int : array
2d array of the confidence interval for the forecast
"""
if exog is not None:
#TODO: make a convenience function for this. we're using the
# pattern elsewhere in the codebase
exog = np.asarray(exog)
if self.k_exog == 1 and exog.ndim == 1:
exog = exog[:, None]
elif exog.ndim == 1:
if len(exog) != self.k_exog:
raise ValueError("1d exog given and len(exog) != k_exog")
exog = exog[None, :]
if exog.shape[0] != steps:
raise ValueError("new exog needed for each step")
# prepend in-sample exog observations
if self.k_ar > 0:
exog = np.vstack((self.model.exog[-self.k_ar:, self.k_trend:],
exog))
forecast = _arma_predict_out_of_sample(self.params,
steps, self.resid, self.k_ar,
self.k_ma, self.k_trend,
self.k_exog, self.model.endog,
exog, method=self.model.method)
# compute the standard errors
fcasterr = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast, fcasterr, alpha)
return forecast, fcasterr, conf_int
def summary(self, alpha=.05):
"""Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals.
Returns
-------
smry : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
from statsmodels.iolib.summary import Summary
model = self.model
title = model.__class__.__name__ + ' Model Results'
method = model.method
# get sample TODO: make better sample machinery for estimation
k_diff = getattr(self, 'k_diff', 0)
if 'mle' in method:
start = k_diff
else:
start = k_diff + self.k_ar
if self.data.dates is not None:
dates = self.data.dates
sample = [dates[start].strftime('%m-%d-%Y')]
sample += ['- ' + dates[-1].strftime('%m-%d-%Y')]
else:
sample = str(start) + ' - ' + str(len(self.data.orig_endog))
k_ar, k_ma = self.k_ar, self.k_ma
if not k_diff:
order = str((k_ar, k_ma))
else:
order = str((k_ar, k_diff, k_ma))
top_left = [('Dep. Variable:', None),
('Model:', [model.__class__.__name__ + order]),
('Method:', [method]),
('Date:', None),
('Time:', None),
('Sample:', [sample[0]]),
('', [sample[1]])
]
top_right = [
('No. Observations:', [str(len(self.model.endog))]),
('Log Likelihood', ["%#5.3f" % self.llf]),
('S.D. of innovations', ["%#5.3f" % self.sigma2**.5]),
('AIC', ["%#5.3f" % self.aic]),
('BIC', ["%#5.3f" % self.bic]),
('HQIC', ["%#5.3f" % self.hqic])]
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
title=title)
smry.add_table_params(self, alpha=alpha, use_t=False)
# Make the roots table
from statsmodels.iolib.table import SimpleTable
if k_ma and k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = arstubs + mastubs
roots = np.r_[self.arroots, self.maroots]
freq = np.r_[self.arfreq, self.mafreq]
elif k_ma:
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = mastubs
roots = self.maroots
freq = self.mafreq
elif k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
stubs = arstubs
roots = self.arroots
freq = self.arfreq
else: # 0,0 model
stubs = []
if len(stubs): # not 0, 0
modulus = np.abs(roots)
data = np.column_stack((roots.real, roots.imag, modulus, freq))
roots_table = SimpleTable(data,
headers=[' Real',
' Imaginary',
' Modulus',
' Frequency'],
title="Roots",
stubs=stubs,
data_fmts=["%17.4f", "%+17.4fj",
"%17.4f", "%17.4f"])
smry.tables.append(roots_table)
return smry
def summary2(self, title=None, alpha=.05, float_format="%.4f"):
"""Experimental summary function for ARIMA Results
Parameters
-----------
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary
results
"""
from pandas import DataFrame
# get sample TODO: make better sample machinery for estimation
k_diff = getattr(self, 'k_diff', 0)
if 'mle' in self.model.method:
start = k_diff
else:
start = k_diff + self.k_ar
if self.data.dates is not None:
dates = self.data.dates
sample = [dates[start].strftime('%m-%d-%Y')]
sample += [dates[-1].strftime('%m-%d-%Y')]
else:
sample = str(start) + ' - ' + str(len(self.data.orig_endog))
k_ar, k_ma = self.k_ar, self.k_ma
# Roots table
if k_ma and k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = arstubs + mastubs
roots = np.r_[self.arroots, self.maroots]
freq = np.r_[self.arfreq, self.mafreq]
elif k_ma:
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = mastubs
roots = self.maroots
freq = self.mafreq
elif k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
stubs = arstubs
roots = self.arroots
freq = self.arfreq
else: # 0, 0 order
stubs = []
if len(stubs):
modulus = np.abs(roots)
data = np.column_stack((roots.real, roots.imag, modulus, freq))
data = DataFrame(data)
data.columns = ['Real', 'Imaginary', 'Modulus', 'Frequency']
data.index = stubs
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
# Model info
model_info = summary2.summary_model(self)
model_info['Method:'] = self.model.method
model_info['Sample:'] = sample[0]
model_info[' '] = sample[-1]
model_info['S.D. of innovations:'] = "%#5.3f" % self.sigma2**.5
model_info['HQIC:'] = "%#5.3f" % self.hqic
model_info['No. Observations:'] = str(len(self.model.endog))
# Parameters
params = summary2.summary_params(self)
smry.add_dict(model_info)
smry.add_df(params, float_format=float_format)
if len(stubs):
smry.add_df(data, float_format="%17.4f")
smry.add_title(results=self, title=title)
return smry
def plot_predict(self, start=None, end=None, exog=None, dynamic=False,
alpha=.05, plot_insample=True, ax=None):
from statsmodels.graphics.utils import _import_mpl, create_mpl_ax
_ = _import_mpl()
fig, ax = create_mpl_ax(ax)
# use predict so you set dates
forecast = self.predict(start, end, exog, dynamic)
# doing this twice. just add a plot keyword to predict?
start = self.model._get_predict_start(start, dynamic=False)
end, out_of_sample = self.model._get_predict_end(end, dynamic=False)
if out_of_sample:
steps = out_of_sample
fc_error = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast[-steps:], fc_error,
alpha)
if hasattr(self.data, "predict_dates"):
from pandas import TimeSeries
forecast = TimeSeries(forecast, index=self.data.predict_dates)
ax = forecast.plot(ax=ax, label='forecast')
else:
ax.plot(forecast)
x = ax.get_lines()[-1].get_xdata()
if out_of_sample:
label = "{0:.0%} confidence interval".format(1 - alpha)
ax.fill_between(x[-out_of_sample:], conf_int[:, 0], conf_int[:, 1],
color='gray', alpha=.5, label=label)
if plot_insample:
ax.plot(x[:end + 1 - start], self.model.endog[start:end+1],
label=self.model.endog_names)
ax.legend(loc='best')
return fig
plot_predict.__doc__ = _plot_predict
class ARMAResultsWrapper(wrap.ResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(ARMAResultsWrapper, ARMAResults)
class ARIMAResults(ARMAResults):
def predict(self, start=None, end=None, exog=None, typ='linear',
dynamic=False):
return self.model.predict(self.params, start, end, exog, typ, dynamic)
predict.__doc__ = _arima_results_predict
def _forecast_error(self, steps):
sigma2 = self.sigma2
ma_rep = arma2ma(np.r_[1, -self.arparams],
np.r_[1, self.maparams], nobs=steps)
fcerr = np.sqrt(np.cumsum(cumsum_n(ma_rep, self.k_diff)**2)*sigma2)
return fcerr
def _forecast_conf_int(self, forecast, fcerr, alpha):
const = norm.ppf(1 - alpha/2.)
conf_int = np.c_[forecast - const*fcerr, forecast + const*fcerr]
return conf_int
def forecast(self, steps=1, exog=None, alpha=.05):
"""
Out-of-sample forecasts
Parameters
----------
steps : int
The number of out of sample forecasts from the end of the
sample.
exog : array
If the model is an ARIMAX, you must provide out of sample
values for the exogenous variables. This should not include
the constant.
alpha : float
The confidence intervals for the forecasts are (1 - alpha) %
Returns
-------
forecast : array
Array of out of sample forecasts
stderr : array
Array of the standard error of the forecasts.
conf_int : array
2d array of the confidence interval for the forecast
Notes
-----
Prediction is done in the levels of the original endogenous variable.
If you would like prediction of differences in levels use `predict`.
"""
if exog is not None:
if self.k_exog == 1 and exog.ndim == 1:
exog = exog[:, None]
if exog.shape[0] != steps:
raise ValueError("new exog needed for each step")
# prepend in-sample exog observations
if self.k_ar > 0:
exog = np.vstack((self.model.exog[-self.k_ar:, self.k_trend:],
exog))
forecast = _arma_predict_out_of_sample(self.params, steps, self.resid,
self.k_ar, self.k_ma,
self.k_trend, self.k_exog,
self.model.endog,
exog, method=self.model.method)
d = self.k_diff
endog = self.model.data.endog[-d:]
forecast = unintegrate(forecast, unintegrate_levels(endog, d))[d:]
# get forecast errors
fcerr = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast, fcerr, alpha)
return forecast, fcerr, conf_int
def plot_predict(self, start=None, end=None, exog=None, dynamic=False,
alpha=.05, plot_insample=True, ax=None):
from statsmodels.graphics.utils import _import_mpl, create_mpl_ax
_ = _import_mpl()
fig, ax = create_mpl_ax(ax)
# use predict so you set dates
forecast = self.predict(start, end, exog, 'levels', dynamic)
# doing this twice. just add a plot keyword to predict?
start = self.model._get_predict_start(start, dynamic=dynamic)
end, out_of_sample = self.model._get_predict_end(end, dynamic=dynamic)
if out_of_sample:
steps = out_of_sample
fc_error = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast[-steps:], fc_error,
alpha)
if hasattr(self.data, "predict_dates"):
from pandas import TimeSeries
forecast = TimeSeries(forecast, index=self.data.predict_dates)
ax = forecast.plot(ax=ax, label='forecast')
else:
ax.plot(forecast)
x = ax.get_lines()[-1].get_xdata()
if out_of_sample:
label = "{0:.0%} confidence interval".format(1 - alpha)
ax.fill_between(x[-out_of_sample:], conf_int[:, 0], conf_int[:, 1],
color='gray', alpha=.5, label=label)
if plot_insample:
import re
k_diff = self.k_diff
label = re.sub("D\d*\.", "", self.model.endog_names)
levels = unintegrate(self.model.endog,
self.model._first_unintegrate)
ax.plot(x[:end + 1 - start],
levels[start + k_diff:end + k_diff + 1], label=label)
ax.legend(loc='best')
return fig
plot_predict.__doc__ = _arima_plot_predict
class ARIMAResultsWrapper(ARMAResultsWrapper):
pass
wrap.populate_wrapper(ARIMAResultsWrapper, ARIMAResults)
if __name__ == "__main__":
import statsmodels.api as sm
# simulate arma process
from statsmodels.tsa.arima_process import arma_generate_sample
y = arma_generate_sample([1., -.75], [1., .25], nsample=1000)
arma = ARMA(y)
res = arma.fit(trend='nc', order=(1, 1))
np.random.seed(12345)
y_arma22 = arma_generate_sample([1., -.85, .35], [1, .25, -.9],
nsample=1000)
arma22 = ARMA(y_arma22)
res22 = arma22.fit(trend='nc', order=(2, 2))
# test CSS
arma22_css = ARMA(y_arma22)
res22css = arma22_css.fit(trend='nc', order=(2, 2), method='css')
data = sm.datasets.sunspots.load()
ar = ARMA(data.endog)
resar = ar.fit(trend='nc', order=(9, 0))
y_arma31 = arma_generate_sample([1, -.75, -.35, .25], [.1],
nsample=1000)
arma31css = ARMA(y_arma31)
res31css = arma31css.fit(order=(3, 1), method="css", trend="nc",
transparams=True)
y_arma13 = arma_generate_sample([1., -.75], [1, .25, -.5, .8],
nsample=1000)
arma13css = ARMA(y_arma13)
res13css = arma13css.fit(order=(1, 3), method='css', trend='nc')
# check css for p < q and q < p
y_arma41 = arma_generate_sample([1., -.75, .35, .25, -.3], [1, -.35],
nsample=1000)
arma41css = ARMA(y_arma41)
res41css = arma41css.fit(order=(4, 1), trend='nc', method='css')
y_arma14 = arma_generate_sample([1, -.25], [1., -.75, .35, .25, -.3],
nsample=1000)
arma14css = ARMA(y_arma14)
res14css = arma14css.fit(order=(4, 1), trend='nc', method='css')
# ARIMA Model
from statsmodels.datasets import webuse
dta = webuse('wpi1')
wpi = dta['wpi']
mod = ARIMA(wpi, (1, 1, 1)).fit()
| bsd-3-clause |
waynenilsen/statsmodels | statsmodels/tsa/statespace/tools.py | 19 | 12762 | """
Statespace Tools
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from statsmodels.tools.data import _is_using_pandas
from . import _statespace
try:
from scipy.linalg.blas import find_best_blas_type
except ImportError: # pragma: no cover
# Shim for SciPy 0.11, derived from tag=0.11 scipy.linalg.blas
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z', 'G': 'z'}
def find_best_blas_type(arrays):
dtype, index = max(
[(ar.dtype, i) for i, ar in enumerate(arrays)])
prefix = _type_conv.get(dtype.char, 'd')
return prefix, dtype, None
prefix_dtype_map = {
's': np.float32, 'd': np.float64, 'c': np.complex64, 'z': np.complex128
}
prefix_statespace_map = {
's': _statespace.sStatespace, 'd': _statespace.dStatespace,
'c': _statespace.cStatespace, 'z': _statespace.zStatespace
}
prefix_kalman_filter_map = {
's': _statespace.sKalmanFilter, 'd': _statespace.dKalmanFilter,
'c': _statespace.cKalmanFilter, 'z': _statespace.zKalmanFilter
}
def companion_matrix(polynomial):
r"""
Create a companion matrix
Parameters
----------
polynomial : array_like, optional.
If an iterable, interpreted as the coefficients of the polynomial from
which to form the companion matrix. Polynomial coefficients are in
order of increasing degree. If an integer, the size of the companion
matrix (the polynomial coefficients are then set to zeros).
Returns
-------
companion_matrix : array
Notes
-----
Returns a matrix of the form
.. math::
\begin{bmatrix}
\phi_1 & 1 & 0 & \cdots & 0 \\
\phi_2 & 0 & 1 & & 0 \\
\vdots & & & \ddots & 0 \\
& & & & 1 \\
\phi_n & 0 & 0 & \cdots & 0 \\
\end{bmatrix}
where some or all of the :math:`\phi_i` may be non-zero (if `polynomial` is
None, then all are equal to zero).
If the coefficients provided are :math:`(c_0, c_1, \dots, c_{n})`,
then the companion matrix is an :math:`n \times n` matrix formed with the
elements in the first column defined as
:math:`\phi_i = -\frac{c_i}{c_0}, i \in 1, \dots, n`.
"""
if isinstance(polynomial, int):
n = polynomial
polynomial = None
else:
n = len(polynomial) - 1
polynomial = np.asanyarray(polynomial)
matrix = np.zeros((n, n))
idx = np.diag_indices(n - 1)
idx = (idx[0], idx[1] + 1)
matrix[idx] = 1
if polynomial is not None and n > 0:
matrix[:, 0] = -polynomial[1:] / polynomial[0]
return matrix
def diff(series, k_diff=1, k_seasonal_diff=None, k_seasons=1):
r"""
Difference a series simply and/or seasonally along the zero-th axis.
Given a series (denoted :math:`y_t`), performs the differencing operation
.. math::
\Delta^d \Delta_s^D y_t
where :math:`d =` `diff`, :math:`s =` `k_seasons`,
:math:`D =` `seasonal\_diff`, and :math:`\Delta` is the difference
operator.
Parameters
----------
series : array_like
The series to be differenced.
diff : int, optional
The number of simple differences to perform. Default is 1.
seasonal_diff : int or None, optional
The number of seasonal differences to perform. Default is no seasonal
differencing.
k_seasons : int, optional
The seasonal lag. Default is 1. Unused if there is no seasonal
differencing.
Returns
-------
differenced : array
The differenced array.
"""
pandas = _is_using_pandas(series, None)
differenced = np.asanyarray(series) if not pandas else series
# Seasonal differencing
if k_seasonal_diff is not None:
while k_seasonal_diff > 0:
if not pandas:
differenced = (
differenced[k_seasons:] - differenced[:-k_seasons]
)
else:
differenced = differenced.diff(k_seasons)[k_seasons:]
k_seasonal_diff -= 1
# Simple differencing
if not pandas:
differenced = np.diff(differenced, k_diff, axis=0)
else:
while k_diff > 0:
differenced = differenced.diff()[1:]
k_diff -= 1
return differenced
def is_invertible(polynomial, threshold=1.):
r"""
Determine if a polynomial is invertible.
Requires all roots of the polynomial lie inside the unit circle.
Parameters
----------
polynomial : array_like
Coefficients of a polynomial, in order of increasing degree.
For example, `polynomial=[1, -0.5]` corresponds to the polynomial
:math:`1 - 0.5x` which has root :math:`2`.
threshold : number
Allowed threshold for `is_invertible` to return True. Default is 1.
Notes
-----
If the coefficients provided are :math:`(c_0, c_1, \dots, c_n)`, then
the corresponding polynomial is :math:`c_0 + c_1 L + \dots + c_n L^n`.
There are three equivalent methods of determining if the polynomial
represented by the coefficients is invertible:
The first method factorizes the polynomial into:
.. math::
C(L) & = c_0 + c_1 L + \dots + c_n L^n \\
& = constant (1 - \lambda_1 L)
(1 - \lambda_2 L) \dots (1 - \lambda_n L)
In order for :math:`C(L)` to be invertible, it must be that each factor
:math:`(1 - \lambda_i L)` is invertible; the condition is then that
:math:`|\lambda_i| < 1`, where :math:`\lambda_i` is a root of the
polynomial.
The second method factorizes the polynomial into:
.. math::
C(L) & = c_0 + c_1 L + \dots + c_n L^n \\
& = constant (L - \zeta_1 L) (L - \zeta_2) \dots (L - \zeta_3)
The condition is now :math:`|\zeta_i| > 1`, where :math:`\zeta_i` is a root
of the polynomial with reversed coefficients and
:math:`\lambda_i = \frac{1}{\zeta_i}`.
Finally, a companion matrix can be formed using the coefficients of the
polynomial. Then the eigenvalues of that matrix give the roots of the
polynomial. This last method is the one actually used.
See Also
--------
companion_matrix
"""
# First method:
# np.all(np.abs(np.roots(np.r_[1, params])) < 1)
# Second method:
# np.all(np.abs(np.roots(np.r_[1, params][::-1])) > 1)
# Final method:
eigvals = np.linalg.eigvals(companion_matrix(polynomial))
return np.all(np.abs(eigvals) < threshold)
def constrain_stationary_univariate(unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array
Unconstrained parameters used by the optimizer, to be transformed to
stationary coefficients of, e.g., an autoregressive or moving average
component.
Returns
-------
constrained : array
Constrained parameters of, e.g., an autoregressive or moving average
component, to be transformed to arbitrary parameters used by the
optimizer.
References
----------
Monahan, John F. 1984.
"A Note on Enforcing Stationarity in
Autoregressive-moving Average Models."
Biometrika 71 (2) (August 1): 403-404.
"""
n = unconstrained.shape[0]
y = np.zeros((n, n), dtype=unconstrained.dtype)
r = unconstrained/((1 + unconstrained**2)**0.5)
for k in range(n):
for i in range(k):
y[k, i] = y[k - 1, i] + r[k] * y[k - 1, k - i - 1]
y[k, k] = r[k]
return -y[n - 1, :]
def unconstrain_stationary_univariate(constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array
Constrained parameters of, e.g., an autoregressive or moving average
component, to be transformed to arbitrary parameters used by the
optimizer.
Returns
-------
unconstrained : array
Unconstrained parameters used by the optimizer, to be transformed to
stationary coefficients of, e.g., an autoregressive or moving average
component.
References
----------
Monahan, John F. 1984.
"A Note on Enforcing Stationarity in
Autoregressive-moving Average Models."
Biometrika 71 (2) (August 1): 403-404.
"""
n = constrained.shape[0]
y = np.zeros((n, n), dtype=constrained.dtype)
y[n-1:] = -constrained
for k in range(n-1, 0, -1):
for i in range(k):
y[k-1, i] = (y[k, i] - y[k, k]*y[k, k-i-1]) / (1 - y[k, k]**2)
r = y.diagonal()
x = r / ((1 - r**2)**0.5)
return x
def validate_matrix_shape(name, shape, nrows, ncols, nobs):
"""
Validate the shape of a possibly time-varying matrix, or raise an exception
Parameters
----------
name : str
The name of the matrix being validated (used in exception messages)
shape : array_like
The shape of the matrix to be validated. May be of size 2 or (if
the matrix is time-varying) 3.
nrows : int
The expected number of rows.
ncols : int
The expected number of columns.
nobs : int
The number of observations (used to validate the last dimension of a
time-varying matrix)
Raises
------
ValueError
If the matrix is not of the desired shape.
"""
ndim = len(shape)
# Enforce dimension
if ndim not in [2, 3]:
raise ValueError('Invalid value for %s matrix. Requires a'
' 2- or 3-dimensional array, got %d dimensions' %
(name, ndim))
# Enforce the shape of the matrix
if not shape[0] == nrows:
raise ValueError('Invalid dimensions for %s matrix: requires %d'
' rows, got %d' % (name, nrows, shape[0]))
if not shape[1] == ncols:
raise ValueError('Invalid dimensions for %s matrix: requires %d'
' columns, got %d' % (name, ncols, shape[1]))
# If we don't yet know `nobs`, don't allow time-varying arrays
if nobs is None and not (ndim == 2 or shape[-1] == 1):
raise ValueError('Invalid dimensions for %s matrix: time-varying'
' matrices cannot be given unless `nobs` is specified'
' (implicitly when a dataset is bound or else set'
' explicity)' % name)
# Enforce time-varying array size
if ndim == 3 and nobs is not None and not shape[-1] in [1, nobs]:
raise ValueError('Invalid dimensions for time-varying %s'
' matrix. Requires shape (*,*,%d), got %s' %
(name, nobs, str(shape)))
def validate_vector_shape(name, shape, nrows, nobs):
"""
Validate the shape of a possibly time-varying vector, or raise an exception
Parameters
----------
name : str
The name of the vector being validated (used in exception messages)
shape : array_like
The shape of the vector to be validated. May be of size 1 or (if
the vector is time-varying) 2.
nrows : int
The expected number of rows (elements of the vector).
nobs : int
The number of observations (used to validate the last dimension of a
time-varying vector)
Raises
------
ValueError
If the vector is not of the desired shape.
"""
ndim = len(shape)
# Enforce dimension
if ndim not in [1, 2]:
raise ValueError('Invalid value for %s vector. Requires a'
' 1- or 2-dimensional array, got %d dimensions' %
(name, ndim))
# Enforce the shape of the vector
if not shape[0] == nrows:
raise ValueError('Invalid dimensions for %s vector: requires %d'
' rows, got %d' % (name, nrows, shape[0]))
# If we don't yet know `nobs`, don't allow time-varying arrays
if nobs is None and not (ndim == 1 or shape[-1] == 1):
raise ValueError('Invalid dimensions for %s vector: time-varying'
' vectors cannot be given unless `nobs` is specified'
' (implicitly when a dataset is bound or else set'
' explicity)' % name)
# Enforce time-varying array size
if ndim == 2 and not shape[1] in [1, nobs]:
raise ValueError('Invalid dimensions for time-varying %s'
' vector. Requires shape (*,%d), got %s' %
(name, nobs, str(shape)))
| bsd-3-clause |
codingpoets/tigl | misc/math-scripts/ms_componentSegmentGeom.py | 2 | 19336 | #
# Copyright (C) 2007-2013 German Aerospace Center (DLR/SC)
#
# Created: 2012-12-17 Martin Siggel <Martin.Siggel@dlr.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @file ms_componentSegmentGeom.py
# @brief Implements coordinate transforms on the component segment geometry
#
from numpy import *
from ms_optAlgs import *
from ms_segmentGeometry import *
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
class Elongation:
Left, No, Right, LeftRight = range(4)
class ComponentSegment:
def __init__(self):
self.segments = []
def addSegment(self, p1, p2, p3, p4):
self.segments.append(ComponentSegmentGeometry(p1,p2,p3,p4))
self.__calcEtaRanges()
def __calcEtaRanges(self):
size = len(self.segments)
# calculate total projected length
len_tot = 0
for item in self.segments:
len_tot = len_tot + item.calcProjectedLeadingEdgeLength(Elongation.No)
len_tot = len_tot + self.segments[0].calcProjectedLeadingEdgeLength(Elongation.Left) \
- self.segments[0].calcProjectedLeadingEdgeLength(Elongation.No) \
+ self.segments[size-1].calcProjectedLeadingEdgeLength(Elongation.Right) \
- self.segments[size-1].calcProjectedLeadingEdgeLength(Elongation.No)
#calculate inner eta of first segment
etastart = (self.segments[0].calcProjectedLeadingEdgeLength(Elongation.Left) \
- self.segments[0].calcProjectedLeadingEdgeLength(Elongation.No) )/len_tot
for item in self.segments:
etastop = etastart + item.calcProjectedLeadingEdgeLength(Elongation.No)/len_tot
item.setLeadingEdgeEtas(etastart, etastop)
etastart = etastop
def draw(self, axis):
for item in self.segments:
item.drawSegment(axis)
def calcPoint(self, eta, xsi):
for item in self.segments:
if item.checkCoordValidity(eta,xsi) == True:
return item.calcCSPoint(eta,xsi)
def drawPoint(self, axis, eta, xsi):
point = self.calcPoint(eta, xsi)
axis.plot([point[0]], [point[1]], [point[2]],'rx')
class ComponentSegmentGeometry:
def __init__(self, p1, p2, p3, p4, etamin = 0, etamax = 1):
self.setPoints(p1, p2, p3, p4, etamin, etamax)
def setPoints(self, p1, p2, p3, p4, etamin, etamax):
self.__p1 = p1
self.__p2 = p2
self.__p3 = p3
self.__p4 = p4
self.__etamin = etamin
self.__etamax = etamax
sv = p2 - p1
sh = p4 - p3
# normal vector of plane
n = array([0, -sv[1], -sv[2]])
## calculate extended leading edge and trailing edge points, this has to be done
# only once per wing segment.
#calculate point where le intersects plane
avo = dot(p4-p1,n) / dot(p2-p1,n)
if avo > 1:
self.__p2p = avo*sv + p1
self.__p4p = p4
else:
self.__p2p = p2;
# check trailing edge
aho = dot(p2-p3,n) / dot(p4-p3,n)
assert aho >= 1
self.__p4p = sh*aho + p3
# now the inner section, the normal vector should be still the same
avi = dot(p3-p1,n)/dot(p2-p1,n)
if avi < 0:
# leading edge has to be extended
self.__p3p = p3
self.__p1p = avi*sv + p1
else:
self.__p1p = p1;
ahi = dot(p1-p3,n)/dot(p4-p3,n)
self.__p3p = p3 + ahi*sh
assert ahi <= 0
#calculate eta values of segment edges, these values define also, when a given cs coordinate is outside the wing segment
self.__eta1 = dot(p1-self.__p1p,n) / dot(self.__p2p-self.__p1p,n)
self.__eta2 = dot(p2-self.__p1p,n) / dot(self.__p2p-self.__p1p,n)
self.__eta3 = dot(p3-self.__p1p,n) / dot(self.__p2p-self.__p1p,n)
self.__eta4 = dot(p4-self.__p1p,n) / dot(self.__p2p-self.__p1p,n)
# sets the eta range from inner segment tip to the outer tip
def setEtaMinMax(self, etamin, etamax):
self.__etamax = etamax
self.__etamin = etamin
def getEtaMinMax(self):
return self.__etamin, self.__etamax
# sets the eta range of the leading edge. if e.g. the trailing edge is longer than
# the leading edge, this makes a difference to setEtaMinMax
def setLeadingEdgeEtas(self, eta_in, eta_out):
# we need to scale etamax, etamin accordingly
etamin = (self.__eta2*eta_in - self.__eta1*eta_out)/(self.__eta2 - self.__eta1)
etamax = etamin + (eta_out - eta_in)/(self.__eta2 - self.__eta1)
self.setEtaMinMax(etamin, etamax)
# only valid with calcsCSPoint (not calcCSPoint2/3)
def checkCoordValidity(self, eta, xsi):
if eta < self.__etamin or eta > self.__etamax or xsi < 0 or xsi > 1:
return False
else:
actetamin = (1-xsi)*self.__eta1 + xsi*self.__eta3
actetamax = (1-xsi)*self.__eta2 + xsi*self.__eta4
if (eta>= actetamin) and (eta <= actetamax):
return True
else:
return False
def calcProjectedLeadingEdgeLength(self, elongation):
p1 = self.__p1;
p2 = self.__p2;
if elongation == Elongation.Left:
p1 = self.__p1p
elif elongation == Elongation.Right:
p2 = self.__p2p
elif elongation == Elongation.LeftRight:
p1 = self.__p1p
p2 = self.__p2p
# project leading edge into the z-y plane
vProj = array([0, 1, 1])
return linalg.norm(vProj*(p2-p1))
def calcCSPoint(self, eta, xsi):
eta_ = (eta - self.__etamin)/(self.__etamax - self.__etamin)
#calculate eta values at given xsi
eta1p = self.__eta1*(1-xsi) + self.__eta3*xsi
eta2p = self.__eta2*(1-xsi) + self.__eta4*xsi
pbeg = self.__p1*(1-xsi) + self.__p3*xsi
pend = self.__p2*(1-xsi) + self.__p4*xsi
p = pbeg + (pend-pbeg)*(eta_ - eta1p)/(eta2p-eta1p)
return p
def calcCSPoint2(self, eta, xsi):
eta_ = (eta - self.__etamin)/(self.__etamax - self.__etamin)
p = self.__p1p + (self.__p2p-self.__p1p)*(eta_);
a = -self.__p1+self.__p2;
b = -self.__p1+self.__p3;
c = self.__p1-self.__p2-self.__p3+self.__p4;
d = self.__p1;
n = array([0, -a[1], -a[2]])
# calc some constants
a1 = dot(p - d, n);
a2 = -dot(b, n);
a3 = dot(a, n);
a4 = dot(c, n);
# this calculates the intersection curve from the segment with a plane (normal vector n, point p2)
al = lambda beta: (a1 + a2*beta)/(a3 + a4*beta);
# diff( eta(xi). xi), tangent in eta xsi space
alp = lambda beta: (a2*a3 - a1*a4)/((a3 + a4*beta)**2);
# 3d intersection curve, parametrized by beta [0,1]
cu = lambda beta: outer(a,al(beta)) + outer(b,beta) + outer(c,al(beta)*beta) + outer(d,ones(size(beta)));
# tangent in 3d space
cup = lambda beta: (outer(a,ones(size(beta))) + outer(c,beta))*outer(ones(3),alp(beta)) + outer(c,al(beta)) + outer(b,ones(size(beta)));
#norm of tangent curve
f = lambda beta: sqrt(sum(cup(beta)**2.,0));
# we want to integrate int f(x)*dx, we do gaussian method
# substitution of f to range [-1,1]
g = lambda x,beta: beta/2.*f((x+1.)*beta/2.);
# gauss x points 5th order, this is really some dark magic ;)
x = array([9.06179845938664e-01,
5.38469310105683e-01,
0.00000000000000e+00,
-5.38469310105683e-01,
-9.06179845938664e-01])
# gauss weights
w = array([2.36926885056189e-01,
4.78628670499366e-01,
5.68888888888889e-01,
4.78628670499366e-01,
2.36926885056189e-01])
# calculate total length of iso eta curve
ltot = dot(g(x,1.),w);
#now we use Newton Raphson to find beta, such that F(beta) == xi*ltot
F = lambda beta: dot(g(x,beta),w)/ltot - xsi
beta = xsi
diff = F(beta)
while abs(diff) > 1e-12:
dir = -diff/(f(beta)/ltot)
beta = beta + dir
diff = F(beta)
return cu(beta)
# alternative implementation, where xsi resembles the relative coordinate
# between intersection point of the leading edge and the intersection point
# of the trailing edge. This intermediate point will then be projected onto
# the true intersection curve
def calcCSPoint3(self, eta, xsi):
debug = True
eta_ = (eta - self.__etamin)/(self.__etamax - self.__etamin)
p = self.__p1p + (self.__p2p-self.__p1p)*(eta_);
a = -self.__p1+self.__p2;
b = -self.__p1+self.__p3;
c = self.__p1-self.__p2-self.__p3+self.__p4;
d = self.__p1;
n = array([0, -a[1], -a[2]])
# calc some constants
a1 = dot(p - d, n);
a2 = -dot(b, n);
a3 = dot(a, n);
a4 = dot(c, n);
# this calculates the intersection curve from the segment with a plane (normal vector n, point p2)
al = lambda beta: (a1 + a2*beta)/(a3 + a4*beta);
# diff( eta(xi). xi), tangent in eta xsi space
alp = lambda beta: (a2*a3 - a1*a4)/((a3 + a4*beta)**2);
# 3d intersection curve, parametrized by beta [0,1]
cu = lambda beta: outer(a,al(beta)) + outer(b,beta) + outer(c,al(beta)*beta) + outer(d,ones(size(beta)));
# tangent in 3d space
cup = lambda beta: (outer(a,ones(size(beta))) + outer(c,beta))*outer(ones(3),alp(beta)) + outer(c,al(beta)) + outer(b,ones(size(beta)));
# calculate intersection with leading and trailing edge
pbeg = cu(0)[:,0]
pend = cu(1)[:,0]
reflen = linalg.norm(pbeg-pend);
# go along this line
pact = (1-xsi)*pbeg + xsi*pend
# project this point onto intersection curve i.e. find beta so that (cu(beta) - pact) * (pbeg-pend) == 0
# as cu(beta) is not linear, we try to find the solution with newton raphson method
f = lambda beta: dot(cu(beta)[:,0] - pact, pend - pbeg)/reflen
fp = lambda beta: dot(cup(beta)[:,0], pend - pbeg)/reflen
beta = xsi
diff = f(beta)
iter = 0;
if debug: print 'Iter:', iter, ' Error=', abs(diff), ' @ Beta=' , beta
while abs(diff) > 1e-12 and iter < 20:
iter += 1
dir = -diff/(fp(beta))
# maybe we need a linesearch here...
beta = beta + dir
diff = f(beta)
if debug: print 'Iter:', iter, ' Error=', abs(diff), '@ Beta=' , beta
if iter >= 20:
print "ERROR: could not project intersection curve onto line"
if debug == True:
myb = linspace(-1.,1., 1000)
val = 0*myb
for i in range(len(myb)):
val[i] = f(myb[i]);
fig = plt.figure()
ax2 = fig.gca()
ax2.plot(myb, val)
# calculate result
point = cu(beta)
# here we got for free our segment coordinates also, which are
# eta_s = al(beta), xsi_s = beta
return point
# calculates the tangents in eta and xsi direction at the given point
def calcCSPointTangents(self, eta, xsi):
eta_ = (eta - self.__etamin)/(self.__etamax - self.__etamin)
deta_ = 1. /(self.__etamax - self.__etamin)
#calculate eta values at given xsi
eta1p = self.__eta1*(1-xsi) + self.__eta3*xsi
eta2p = self.__eta2*(1-xsi) + self.__eta4*xsi
pbeg = self.__p1*(1-xsi) + self.__p3*xsi
pend = self.__p2*(1-xsi) + self.__p4*xsi
# calculate derivatives
deta1p = self.__eta3 - self.__eta1;
deta2p = self.__eta4 - self.__eta2;
dpbeg = self.__p3 - self.__p1;
dpend = self.__p4 - self.__p2;
J = zeros((3,2))
J[:,0] = (pend-pbeg)/(eta2p-eta1p)*deta_;
J[:,1] = dpbeg + (dpend-dpbeg)*(eta_ - eta1p)/(eta2p-eta1p) + (pend - pbeg)*(-deta1p/(eta2p-eta1p) - (eta_ - eta1p)/((eta2p-eta1p)**2)*(deta2p - deta1p) );
return J
def calcCSPointNormal(self, eta, xsi):
J = self.calcCSPointTangents(eta, xsi);
normal = cross(J[:,1],J[:,0])
return normal/linalg.norm(normal)
def __calcCSHessian(self, eta, xsi, p):
eta_ = (eta - self.__etamin)/(self.__etamax - self.__etamin)
deta_ = 1. /(self.__etamax - self.__etamin)
#calculate eta values at given xsi
eta1p = self.__eta1*(1-xsi) + self.__eta3*xsi
eta2p = self.__eta2*(1-xsi) + self.__eta4*xsi
pbeg = self.__p1*(1-xsi) + self.__p3*xsi
pend = self.__p2*(1-xsi) + self.__p4*xsi
# calculate derivatives
deta1p = self.__eta3 - self.__eta1;
deta2p = self.__eta4 - self.__eta2;
dpbeg = self.__p3 - self.__p1;
dpend = self.__p4 - self.__p2;
# helper variables and their derivatives to xsi
hv1 = pend - pbeg;
dhv1 = dpend - dpbeg;
h2 = eta_ - eta1p;
dh2 = - deta1p;
h3 = 1/(eta2p-eta1p);
dh3 = -1/(eta2p-eta1p)**2*(deta2p-deta1p);
d2h3 = 2/(eta2p-eta1p)**3*(deta2p-deta1p)**2;
# p(eta, xsi)
p_ = pbeg + hv1*h2*h3;
# first derivative, dp(eta, xsi)
J1 = hv1*h3*deta_;
J2 = dpbeg + dhv1*h2*h3 + hv1*(dh2*h3 + h2 * dh3);
# second order derivative d2p(eta, xsi), H11 is zero!
H21 = (dhv1*h3 + hv1*dh3)*deta_;
H22 = dhv1*(dh2*h3 + h2*dh3)*2 + hv1*( 2*dh2*dh3 + h2*d2h3 );
# finally applying for the object function
H = zeros((2,2))
H[0,0] = dot(J1,J1)
H[0,1] = dot(J1,J2) + dot(p_ - p, H21)
H[1,1] = dot(J2,J2) + dot(p_ - p, H22)
H[1,0] = H[0,1];
return 2.*H
def projectOnCS(self, p):
opttype = 'newton'
# calculate initial guess, project onto leading edge and inner section
eta = dot(p - self.__p1p, self.__p2p - self.__p1p)/( linalg.norm(self.__p2p - self.__p1p)**2)
xsi = dot(p - self.__p1, self.__p3 - self.__p1)/( linalg.norm(self.__p3 - self.__p1)**2)
# scale according to local eta range
eta = eta*(self.__etamax-self.__etamin) + self.__etamin
x = array([eta,xsi])
of = lambda x: linalg.norm(self.calcCSPoint(x[0], x[1])-p)**2;
ograd = lambda x: 2.* dot(self.calcCSPointTangents(x[0], x[1]).transpose(), self.calcCSPoint(x[0], x[1])-p);
#ograd = lambda x: ms_numGrad(of, x, 1e-9)
ohess = lambda x: self.__calcCSHessian( x[0], x[1], p);
#ohess = lambda x: ms_numHess(ograd, x, 1e-9)
fig2 = plt.figure();
X, Y = meshgrid(arange(self.__etamin-0.2, self.__etamax+0.2, 0.02), arange(-0.2, 1.2, 0.02))
Z = zeros(X.shape);
for i in range(0,size(X,0)):
for j in range(0,size(X,1)):
Z[i,j] = of([X[i,j], Y[i,j]])
plt.imshow(Z,origin='lower', extent=[self.__etamin-0.2, self.__etamax+0.2,-0.2,1.2], aspect=1./1.)
plt.colorbar();
plt.contour(X,Y,Z)
plt.title('Objective function opt:'+opttype)
plt.xlabel('eta');
plt.ylabel('xsi');
if opttype == 'bfgs':
x_= ms_optQuasiNewton(of,ograd, x, 'bfgs')
elif opttype == 'sr1':
x_= ms_optQuasiNewton(of,ograd, x, 'sr1')
elif opttype == 'gradient':
x_= ms_optSteepestDescent(of,ograd, x)
elif opttype == 'cg':
x_= ms_optCG(of,ograd, x, 'fr')
else:
x_= ms_optNewton(of,ograd,ohess,x)
eta = x_[0]; xsi = x_[1];
return (eta, xsi)
def projectOnCS3(self, p):
segment = SegmentGeometry(self.__p1, self.__p2, self.__p3, self.__p4)
# get the projection point on the, here we get some numerical uncertainty
# if we'd knew that p is already on the plane, we could directly use p
alpha, beta = segment.projectOnSegment(p);
return self.convertAlphaBetaToEtaXsi(alpha, beta)
#return self.convertXYZtoEtaXsi(p[:,0])
def convertAlphaBetaToEtaXsi(self, alpha, beta):
segment = SegmentGeometry(self.__p1, self.__p2, self.__p3, self.__p4)
p_proj = segment.getPoint(alpha, beta)[:,0]
return self.convertXYZtoEtaXsi(p_proj)
# we must ensure that p_proj already lies on the segment, if unsure use projectOnCS3
def convertXYZtoEtaXsi(self, p_proj):
# project leading edge into the z-y plane
vProj = array([0, 1, 1])
n = vProj*(self.__p2p-self.__p1p)
# calc eta koordinate of that point
eta = (dot(p_proj,n)*(self.__eta2 - self.__eta1) + dot(self.__p2,n)*self.__eta1 - dot(self.__p1,n)*self.__eta2) \
/ dot(self.__p2 - self.__p1, n);
# intersection point of plane with leading edge
p_ = 1./(self.__eta2 - self.__eta1)*( (self.__eta2 - eta)*self.__p1 + (eta - self.__eta1)*self.__p2 )
a = -self.__p1+self.__p2;
b = -self.__p1+self.__p3;
c = self.__p1-self.__p2-self.__p3+self.__p4;
d = self.__p1;
# calc some constants
a1 = dot(p_ - d, n);
a2 = -dot(b, n);
a3 = dot(a, n);
a4 = dot(c, n);
# this calculates the intersection curve from the segment with a plane (normal vector n, point p2)
al = lambda beta: (a1 + a2*beta)/(a3 + a4*beta);
# 3d intersection curve, parametrized by beta [0,1]
cu = lambda beta: outer(a,al(beta)) + outer(b,beta) + outer(c,al(beta)*beta) + outer(d,ones(size(beta)));
# now we have to find the xi coordinate, i.e. (pbeg + (pend-pbeg)*xi - p_proj)*(pbeg-pend) == 0
pbeg = cu(0)[:,0]
pend = cu(1)[:,0]
xsi = dot(p_proj - pbeg, pbeg-pend)/dot(pend-pbeg, pbeg-pend);
return eta, xsi
def calcCSIsoXsiLine(self, xsi, extentToGeometry = False):
etamin = self.__etamin
etamax = self.__etamax
if not extentToGeometry:
etamin = xsi * (self.__eta3 - self.__eta1) + self.__eta1
etamin = etamin * (self.__etamax - self.__etamin) + self.__etamin
etamax = xsi * (self.__eta4 - self.__eta2) + self.__eta2
etamax = etamax * (self.__etamax - self.__etamin) + self.__etamin
P1 = self.calcCSPoint(etamin,xsi);
P2 = self.calcCSPoint(etamax,xsi);
return ([P1[0], P2[0]], [P1[1], P2[1]], [P1[2], P2[2]] )
def calcCSIsoEtaLine(self, eta, extentToGeometry = False, npoints = 30):
eta_ = (eta - self.__etamin)/(self.__etamax - self.__etamin)
# calculate bilinear vectors
a = -self.__p1 + self.__p2;
b = -self.__p1 + self.__p3;
c = self.__p1 - self.__p2 - self.__p3 + self.__p4;
d = self.__p1;
# leading edge vector
sv = self.__p2 - self.__p1
# normal vector of intersection plane
n = array([0, -sv[1], -sv[2]])
# calculate eta point on leading edge
p_ = self.__p1p*(1-eta_) + eta_*self.__p2p;
a1 = dot(p_-d,n);
a2 = -dot(b,n);
a3 = dot(a,n);
a4 = dot(c,n);
# this calculates the intersection curve from the segment with a plane (normal vector n, point p2)
al = lambda beta: (a1 + a2*beta)/(a3 + a4*beta);
# 3d intersection curve, parameterized by beta [0,1]
cu = lambda beta: outer(a,al(beta)) + outer(b,beta) + outer(c,al(beta)*beta) + outer(d, ones(size(beta)));
xsistart = 0 if (eta_ <= self.__eta2) else (eta_ - self.__eta2)/(self.__eta4 - self.__eta2)
xsistop = 1 if (eta_ <= self.__eta4) else (eta_ - self.__eta2)/(self.__eta4 - self.__eta2)
xsistart = xsistart if (eta_ >= self.__eta1) else (eta_ - self.__eta1)/(self.__eta3 - self.__eta1)
xsistop = xsistop if (eta_ >= self.__eta3) else (eta_ - self.__eta1)/(self.__eta3 - self.__eta1)
if extentToGeometry:
xsi = linspace(0,1,npoints)
else:
xsi = linspace(xsistart,xsistop,npoints)
points = cu(xsi);
X = points[0,:];
Y = points[1,:];
Z = points[2,:];
return (X,Y,Z)
def drawSegment(self, axis, extentToGeometry = False):
start = math.ceil (self.__etamin*10.)/10.
stop = math.floor(self.__etamax*10.)/10.
alpha = start
while alpha <= stop:
X,Y,Z = self.calcCSIsoEtaLine(alpha, extentToGeometry)
axis.plot(X,Y,Z,'g')
alpha = alpha + 0.1
beta = 0.0
while beta <= 1.0:
X,Y,Z = self.calcCSIsoXsiLine(beta, extentToGeometry)
axis.plot(X,Y,Z,'g');
beta = beta + 0.1
lw = 2
axis.plot([self.__p1[0], self.__p2[0]], [self.__p1[1], self.__p2[1]], [self.__p1[2], self.__p2[2]],'b',linewidth=lw);
axis.plot([self.__p1[0], self.__p3[0]], [self.__p1[1], self.__p3[1]], [self.__p1[2], self.__p3[2]],'b',linewidth=lw);
axis.plot([self.__p4[0], self.__p2[0]], [self.__p4[1], self.__p2[1]], [self.__p4[2], self.__p2[2]],'b',linewidth=lw);
axis.plot([self.__p3[0], self.__p4[0]], [self.__p3[1], self.__p4[1]], [self.__p3[2], self.__p4[2]],'b',linewidth=lw);
| apache-2.0 |
bertdecoensel/noysim | noysim/viewer.py | 1 | 28081 | # Noysim -- Noise simulation tools for Aimsun.
# Copyright (c) 2010-2011 by Bert De Coensel, Ghent University & Griffith University.
#
# Classes for sending and viewing noise levels in real-time
import os
import sys
import socket
import threading
import time
import random
import msvcrt
if not hasattr(sys, 'frozen'):
import wxversion
wxversion.select('2.8-msw-unicode') # version of wxPython
import wx
from wx.lib.agw.floatspin import FloatSpin, EVT_FLOATSPIN
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas, NavigationToolbar2WxAgg as NavigationToolbar
import numpy
import pylab
USERPYC = True # if set to False, low level sockets are used
if USERPYC:
try:
# check if rpyc is installed
import rpyc
from rpyc.utils.server import ThreadedServer
from rpyc.utils.classic import DEFAULT_SERVER_PORT
from rpyc.utils.registry import UDPRegistryClient
from rpyc.core import SlaveService
except:
# revert to using low level sockets
USERPYC = False
raise Exception('rpyc has to be installed')
import version
#---------------------------------------------------------------------------------------------------
# Parameters
#---------------------------------------------------------------------------------------------------
# general parameters
NAME = '%s %s Viewer' % (version.name.capitalize(), version.version)
ABOUT = NAME + '\n\n' + version.copyright.replace(', ', '\n') + '\n' + version.email
# communication with level viewer
RPYCTHREAD = None # global level thread variable (needed to circumvent the rpyc service factory)
HOST = 'localhost'
PORT = 50007
TIMEOUT = 0.01
SLEEP = 0.001
BUFSIZE = 4096
# timing parameters
REDRAWTIME = 100 # number of milliseconds between redraws
FLASHTIME = 1500 # duration of messages on the status bar, in milliseconds
# visualisation parameters
DPI = 100 # dots per inch for plotting and saving
FIGSIZE = (3.0, 3.0) # size of plotting canvas in inches (defaults to 300x300 pixels)
FONTSIZE = 8 # size of font of labels
BGCOLOR = 'black'
GRIDCOLOR = 'gray'
LINECOLOR = 'yellow'
LINEWIDTH = 1
# axes parameters
SPININC = 5.0 # increment of spin controls
XMIN = 10.0 # minimal x-axis range width
XWIDTH = 30.0 # initial value of x-axis range width
YMIN = (0.0, 10.0) # minimal y-axis low and height
YRANGE = (30.0, 60.0) # initial values of y-axis low and height
MARGIN = 1.0 # margin for auto range of levels
# test parameters
TESTDT = 0.5 # simulation timestep in seconds
TESTSLEEP = 0.2 # time between level updates
TESTLOCS = ['(1.00,2.00,3.00)', '(4.00,5.00,6.00)'] # locations of test receivers
randomLevel = lambda: 40.0 + 30.0*random.random() # function that generates a random sound level
#---------------------------------------------------------------------------------------------------
# Communication from plugin to viewer
#---------------------------------------------------------------------------------------------------
class LevelBuffer(object):
""" base interface for sending levels to the viewer, implementing the one-way communication protocol
types of messages:
- command: 'clear'
- levels: 't;loc:level;loc:level'
"""
def __init__(self, host = HOST, port = PORT, active = True, sleep = 0, verbose = False):
object.__init__(self)
self.host = host
self.port = port
self.queue = [] # queue of messages to send
self.active = active # if False, nothing is sent
self.sleep = sleep/1000.0 # time to sleep (in seconds) after sending levels (to slow down a simulation)
self.verbose = verbose # if True, debug code is printed
def sendLevels(self, t, levels):
""" send a series of levels at a particular time at different locations (dict of location:level) """
if self.active:
message = ('%.2f;' % t) + ';'.join([('%s:%.2f' % (str(loc), level)) for loc, level in levels.iteritems()])
self.queue.append(message)
self.flush()
if self.sleep > 0.0:
time.sleep(self.sleep)
def sendClear(self):
""" send a 'clear' message """
if self.active:
message = 'clear'
self.queue.append(message)
self.flush()
def send(self, message):
""" should send a single message string to the viewer (raise an error if not succesful) """
raise NotImplementedError
def flush(self):
""" try to send all message strings in the queue to the viewer """
while (len(self.queue) > 0) and (self.active == True):
message = self.queue[0]
try:
if self.verbose:
print 'trying to send message "%s"' % message
self.send(message)
# remove message from queue
del self.queue[0]
if self.verbose:
print 'sending succesful'
except:
if self.verbose:
print 'sending failed - aborting - length of queue: %d' % len(self.queue)
break
class SocketLevelBuffer(LevelBuffer):
""" implement the level buffer using low level sockets """
def __init__(self, *args, **kwargs):
LevelBuffer.__init__(self, *args, **kwargs)
def send(self, message):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port))
s.sendall(message)
s.close()
class RPyCLevelBuffer(LevelBuffer):
""" implement the level buffer using Remote Python Calls (RPyC) """
def __init__(self, *args, **kwargs):
LevelBuffer.__init__(self, *args, **kwargs)
def send(self, message):
conn = rpyc.classic.connect('localhost')
conn.root.processMessage(message)
conn.close()
def createLevelBuffer(*args, **kwargs):
""" create a level buffer according to the defined protocol """
if USERPYC:
return RPyCLevelBuffer(*args, **kwargs)
else:
return SocketLevelBuffer(*args, **kwargs)
#---------------------------------------------------------------------------------------------------
# Viewer thread for receiving levels
#---------------------------------------------------------------------------------------------------
VIEWERLOCK = threading.Lock()
class BaseLevelThread(threading.Thread):
""" base interface for a thread for receiving levels """
def __init__(self):
threading.Thread.__init__(self)
self.active = True # set this to false for the thread to stop
self.clear()
def clear(self):
""" clear all data """
VIEWERLOCK.acquire()
self.data = {} # dict with received levels, for each receiver location
self.times = [] # list with times
VIEWERLOCK.release()
def locations(self):
""" return the receiver locations """
VIEWERLOCK.acquire()
result = self.data.keys()[:]
VIEWERLOCK.release()
return result
def levels(self, loc):
""" return the times and levels at the given location """
VIEWERLOCK.acquire()
result = (numpy.asarray(self.times).copy(), numpy.asarray(self.data[loc]).copy())
VIEWERLOCK.release()
return result
class DummyLevelThread(BaseLevelThread):
""" dummy interface for receiving levels, which adds levels at regular instances in time """
def __init__(self, dt = TESTDT, sleep = TESTSLEEP, locs = TESTLOCS):
BaseLevelThread.__init__(self)
self.dt = dt
self.sleep = sleep
self.locs = locs
def run(self):
""" instantiate the server """
print 'thread started...'
t = 0.0
while self.active:
t += self.dt
VIEWERLOCK.acquire()
self.times.append(t)
for loc in self.locs:
if not loc in self.data:
self.data[loc] = []
level = randomLevel()
self.data[loc].append(level)
print 'level received succesfully: time %.2fs, %s, %.2f dB' % (t, loc,level)
VIEWERLOCK.release()
time.sleep(self.sleep)
class ViewerLevelThread(BaseLevelThread):
""" interface for receiving levels, as a thread that runs a server which listens to new levels """
def __init__(self, frame = None, host = HOST, port = PORT, verbose = False):
BaseLevelThread.__init__(self)
self.frame = frame # frame to which the thread is connected
self.host = host
self.port = port
self.verbose = verbose # if True, debug code is printed
def processMessage(self, message):
""" process an incoming message """
if message == '':
pass
elif message == 'clear':
self.clear()
# clear the frame if applicable
if self.frame != None:
self.frame.clear_choices()
self.frame.clear_plot()
if self.verbose:
print 'levels cleared'
else:
# parse the incoming message
tokens = message.split(';')
t = float(tokens[0])
levels = []
for token in tokens[1:]:
loc, level = token.split(':')
level = float(level)
levels.append((loc, level))
# when parsing is succesful, update the data
if (len(self.times) > 0) and (t < self.times[-1]):
if self.verbose:
print 'discarding non-chronological levels: %s' % message
else:
VIEWERLOCK.acquire()
self.times.append(t)
for loc, level in levels:
if not loc in self.data:
self.data[loc] = []
self.data[loc].append(level)
if self.verbose:
print 'level received succesfully: time %.2fs, %s, %.2f dB' % (t, loc,level)
VIEWERLOCK.release()
class SocketViewerLevelThread(ViewerLevelThread):
""" implementation of viewer level thread using low level sockets """
def __init__(self, *args, **kwargs):
ViewerLevelThread.__init__(self, *args, **kwargs)
def run(self):
""" instantiate the server """
if self.verbose:
print 'thread started...'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((self.host, self.port))
s.listen(1)
while self.active:
# wait for a connection from the plugin
try:
s.settimeout(TIMEOUT)
conn, addr = s.accept()
s.settimeout(None)
except:
time.sleep(SLEEP)
continue
# when there is a connection, fetch the message
if self.verbose:
print 'connection established'
data = ''
try:
while True:
temp = conn.recv(BUFSIZE)
if not temp:
break
data += temp
conn.close()
except:
if self.verbose:
print 'socket error, so skipping message'
# update the levels
try:
self.processMessage(data)
except:
if self.verbose:
print 'error with received message: "%s"' % data
s.close()
if USERPYC:
class RPyCViewerService(SlaveService):
""" service for managing received messages using Remote Python Calls (RPyC) """
def __init__(self, conn):
SlaveService.__init__(self, conn)
def exposed_processMessage(self, message):
""" send a message to the parent thread for processing """
global RPYCTHREAD
RPYCTHREAD.processMessage(message)
class RPyCViewerLevelThread(ViewerLevelThread):
""" implementation of viewer level thread using Remote Python Calls (RPyC) """
def __init__(self, *args, **kwargs):
ViewerLevelThread.__init__(self, *args, **kwargs)
def run(self):
""" instantiate the server """
if self.verbose:
print 'thread started...'
global RPYCTHREAD
RPYCTHREAD = self
self.server = ThreadedServer(RPyCViewerService, port = DEFAULT_SERVER_PORT, auto_register = False, registrar = UDPRegistryClient())
self.server.start()
def join(self):
self.server.close()
ViewerLevelThread.join(self)
def createViewerLevelThread(*args, **kwargs):
""" create a viewer level thread according to the defined protocol """
if USERPYC:
return RPyCViewerLevelThread(*args, **kwargs)
else:
return SocketViewerLevelThread(*args, **kwargs)
#---------------------------------------------------------------------------------------------------
# Utility GUI controls
#---------------------------------------------------------------------------------------------------
class XAxisRangeBox(wx.Panel):
""" panel for adjusting x-axis range """
def __init__(self, parent, ID, minvalue = XMIN, initvalue = XWIDTH, increment = SPININC):
wx.Panel.__init__(self, parent, ID)
self.minvalue = minvalue
self.value = initvalue # initial x-axis range width (in sliding mode)
# controls
self.radio_full = wx.RadioButton(self, -1, label = 'Full range', style = wx.RB_GROUP)
self.radio_slide = wx.RadioButton(self, -1, label = 'Sliding')
self.slide_width = FloatSpin(self, -1, size = (50, -1), digits = 0, value = self.value, min_val = minvalue, increment = increment)
self.slide_width.GetTextCtrl().SetEditable(False)
# event bindings
self.Bind(wx.EVT_UPDATE_UI, self.on_update_radio_buttons, self.radio_full)
self.Bind(EVT_FLOATSPIN, self.on_float_spin, self.slide_width)
# layout
box = wx.StaticBox(self, -1, 'X-axis')
sizer = wx.StaticBoxSizer(box, wx.VERTICAL)
slide_box = wx.BoxSizer(wx.HORIZONTAL)
slide_box.Add(self.radio_slide, flag=wx.ALIGN_CENTER_VERTICAL)
slide_box.Add(self.slide_width, flag=wx.ALIGN_CENTER_VERTICAL)
sizer.Add(self.radio_full, 0, wx.ALL, 10)
sizer.Add(slide_box, 0, wx.ALL, 10)
self.SetSizer(sizer)
sizer.Fit(self)
def on_update_radio_buttons(self, event):
""" called when the radio buttons are toggled """
self.slide_width.Enable(self.radio_slide.GetValue())
def on_float_spin(self, event):
""" called when the sliding mode spinbox is changed """
self.value = self.slide_width.GetValue()
def is_full(self):
""" return True if full range is checked """
return self.radio_full.GetValue()
class YAxisRangeBox(wx.Panel):
""" panel for adjusting y-axis range """
def __init__(self, parent, ID, minvalue = YMIN, initvalue = YRANGE, increment = SPININC):
wx.Panel.__init__(self, parent, ID)
self.value = initvalue # initial y-axis range (in manual mode), i.e. (min, max-min)
# controls
self.radio_auto = wx.RadioButton(self, -1, label = 'Auto', style = wx.RB_GROUP)
self.radio_manual = wx.RadioButton(self, -1, label = 'Manual')
self.manual_min = FloatSpin(self, -1, size = (50, -1), digits = 0, value = self.value[0], min_val = minvalue[0], increment = increment)
self.manual_min.GetTextCtrl().SetEditable(False)
self.manual_width = FloatSpin(self, -1, size = (50, -1), digits = 0, value = self.value[1], min_val = minvalue[1], increment = increment)
self.manual_width.GetTextCtrl().SetEditable(False)
# event bindings
self.Bind(wx.EVT_UPDATE_UI, self.on_update_radio_buttons, self.radio_auto)
self.Bind(EVT_FLOATSPIN, self.on_float_spin, self.manual_min)
self.Bind(EVT_FLOATSPIN, self.on_float_spin, self.manual_width)
# layout
box = wx.StaticBox(self, -1, 'Y-axis')
sizer = wx.StaticBoxSizer(box, wx.VERTICAL)
manual_box = wx.BoxSizer(wx.HORIZONTAL)
manual_box.Add(self.radio_manual, flag=wx.ALIGN_CENTER_VERTICAL)
manual_box.Add(self.manual_min, flag=wx.ALIGN_CENTER_VERTICAL)
manual_box.Add(self.manual_width, flag=wx.ALIGN_CENTER_VERTICAL)
sizer.Add(self.radio_auto, 0, wx.ALL, 10)
sizer.Add(manual_box, 0, wx.ALL, 10)
self.SetSizer(sizer)
sizer.Fit(self)
def on_update_radio_buttons(self, event):
""" called when the radio buttons are toggled """
toggle = self.radio_manual.GetValue()
self.manual_min.Enable(toggle)
self.manual_width.Enable(toggle)
def on_float_spin(self, event):
""" called when one of the manual mode spinboxes is changed """
self.value = (self.manual_min.GetValue(), self.manual_width.GetValue())
def is_auto(self):
""" return True if auto range is checked """
return self.radio_auto.GetValue()
#---------------------------------------------------------------------------------------------------
# Viewer frame class
#---------------------------------------------------------------------------------------------------
class ViewerFrame(wx.Frame):
""" main frame of the viewer application """
def __init__(self, test = False):
wx.Frame.__init__(self, None, -1, NAME)
self.paused = False
self.locations = []
# creation of controls
self.create_menu()
self.create_status_bar()
self.create_main_panel()
# timer for redrawing
self.redraw_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)
self.redraw_timer.Start(REDRAWTIME)
# handle closing the frame
self.Bind(wx.EVT_CLOSE, self.on_exit, self)
# manage window style (always on top or not)
self.wstyle = self.GetWindowStyle()
self.SetWindowStyle(self.wstyle | wx.STAY_ON_TOP)
# coordination with data server
if test:
self.thread = DummyLevelThread()
else:
self.thread = createViewerLevelThread(frame = self)
self.thread.start()
def create_menu(self):
""" construction of menu bar """
self.menubar = wx.MenuBar()
# File menu
menu_file = wx.Menu()
m_expt = menu_file.Append(-1, '&Save plot\tCtrl-S')
self.Bind(wx.EVT_MENU, self.on_save_plot, m_expt)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, 'E&xit\tCtrl-X')
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
# View menu
menu_view = wx.Menu()
self.m_ontop = menu_view.Append(-1, '&Stay on top', kind = wx.ITEM_CHECK)
self.m_ontop.Check(True)
self.Bind(wx.EVT_MENU, self.on_ontop, self.m_ontop)
# Help menu
menu_help = wx.Menu()
m_about = menu_help.Append(-1, '&About...')
self.Bind(wx.EVT_MENU, self.on_about, m_about)
# construction of menu bar
self.menubar.Append(menu_file, '&File')
self.menubar.Append(menu_view, '&View')
self.menubar.Append(menu_help, '&Help')
self.SetMenuBar(self.menubar)
def create_status_bar(self):
""" construction of status bar """
self.statusbar = self.CreateStatusBar()
self.statusbar.SetFieldsCount(2)
self.statusbar.SetStatusWidths([50, -1])
def create_main_panel(self):
""" construction of the main controls """
self.panel = wx.Panel(self)
# contruct plotting area
self.fig = Figure(FIGSIZE, dpi = DPI)
# construct axes
self.axes = self.fig.add_subplot(111)
self.axes.set_axis_bgcolor(BGCOLOR)
# adjust font size of axes labels
pylab.setp(self.axes.get_xticklabels(), fontsize = FONTSIZE)
pylab.setp(self.axes.get_yticklabels(), fontsize = FONTSIZE)
# construct canvas with plotting area
self.plot_data = self.axes.plot([], linewidth = LINEWIDTH, color = LINECOLOR)[0]
self.canvas = FigCanvas(self.panel, -1, self.fig)
# construct location choice box
self.location_txt = wx.StaticText(self.panel, -1, label = ' Select location:')
self.location_box = wx.Choice(self.panel, -1, choices = [], size = (150,-1))
self.location_box.Enable(False)
self.Bind(wx.EVT_CHOICE, lambda event: self.draw_plot(), self.location_box)
# layout location choice box
self.hbox0 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox0.Add(self.location_txt, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox0.Add(self.location_box, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
# construct buttons
self.pause_button = wx.Button(self.panel, -1, 'Pause')
self.Bind(wx.EVT_BUTTON, self.on_pause_button, self.pause_button)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_pause_button, self.pause_button)
self.clear_button = wx.Button(self.panel, -1, 'Clear')
self.Bind(wx.EVT_BUTTON, self.on_clear_button, self.clear_button)
self.cb_grid = wx.CheckBox(self.panel, -1, 'Show grid', style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, lambda event: self.draw_plot(), self.cb_grid)
self.cb_grid.SetValue(True)
self.cb_xlab = wx.CheckBox(self.panel, -1, 'X-labels', style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, lambda event: self.draw_plot(), self.cb_xlab)
self.cb_xlab.SetValue(True)
# layout buttons (add space using self.hbox1.AddSpacer(5))
self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox1.Add(self.pause_button, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.Add(self.clear_button, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.Add(self.cb_grid, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.Add(self.cb_xlab, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
# construct axis controls
self.xrange_control = XAxisRangeBox(self.panel, -1)
self.yrange_control = YAxisRangeBox(self.panel, -1)
# layout axis controls
self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox2.Add(self.xrange_control, border=5, flag=wx.ALL)
self.hbox2.Add(self.yrange_control, border=5, flag=wx.ALL)
# finally, create layout of viewer frame
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, flag=wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.hbox0, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.vbox.Add(self.hbox1, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.vbox.Add(self.hbox2, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
def draw_plot(self):
""" redraw the plot and update the gui if necessary """
if not self.paused:
# check if data is available
if len(self.locations) == 0:
self.locations = sorted(self.thread.locations())
if len(self.locations) > 0:
self.location_box.AppendItems(self.locations)
self.location_box.SetSelection(0)
self.location_box.Enable(True)
self.flash_status_message('Connection established')
if len(self.locations) > 0:
# fetch data at selected receiver location
loc = self.locations[self.location_box.GetSelection()]
times, levels = self.thread.levels(loc)
if (len(times) == len(levels)):
# calculate x-axis limits
if self.xrange_control.is_full():
# show the full range for the x-axis
xmin = times[0]
xmax = max(times[0] + self.xrange_control.minvalue, times[-1])
else:
# show a sliding window
xmax = times[-1]
xmin = xmax - self.xrange_control.value
# calculate y-axis limits
if self.yrange_control.is_auto():
# find the min and max values of the data and add a minimal margin
ymin = round(min(levels), 0) - MARGIN
ymax = round(max(levels), 0) + MARGIN
else:
# use manual interval
ymin = self.yrange_control.value[0]
ymax = ymin + self.yrange_control.value[1]
# set axis limits
self.axes.set_xbound(lower = xmin, upper = xmax)
self.axes.set_ybound(lower = ymin, upper = ymax)
# finally, plot the data and redraw the plot
self.plot_data.set_xdata(numpy.array(times))
self.plot_data.set_ydata(numpy.array(levels))
# draw grid
if self.cb_grid.IsChecked():
self.axes.grid(True, color = GRIDCOLOR)
else:
self.axes.grid(False)
# draw axis labels
pylab.setp(self.axes.get_xticklabels(), visible = self.cb_xlab.IsChecked())
self.canvas.draw()
def clear_plot(self):
""" clear the data on the plot """
self.plot_data.set_xdata([])
self.plot_data.set_ydata([])
self.canvas.draw()
def on_redraw_timer(self, event):
""" redraw the plot """
self.draw_plot()
def on_pause_button(self, event):
""" called when the pause button is clicked """
self.paused = not self.paused
if self.paused:
self.statusbar.SetStatusText('Paused', 0)
else:
self.statusbar.SetStatusText('', 0)
def on_update_pause_button(self, event):
""" called when the pause button is to be updated """
label = 'Resume' if self.paused else 'Pause'
self.pause_button.SetLabel(label)
def on_clear_button(self, event):
""" called when the clear butten is clicked """
self.thread.clear()
self.clear_choices()
self.clear_plot()
def clear_choices(self):
""" clear the choices box """
self.locations = []
self.location_box.Clear()
self.location_box.Enable(False)
self.flash_status_message('Cleared')
def on_save_plot(self, event):
""" show a window for saving a screenshot """
dlg = wx.FileDialog(self, message = 'Save plot as...', defaultDir = os.getcwd(), defaultFile = 'plot.png', wildcard = 'PNG (*.png)|*.png', style = wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi = DPI)
self.flash_status_message('Saved to %s' % path)
def stop_thread(self):
""" stop the level thread """
self.thread.active = False
self.thread.join()
def on_exit(self, event):
""" called when the viewer is closed """
self.stop_thread()
self.Destroy()
def on_ontop(self, event):
""" toggles the stay on top modus """
if self.m_ontop.IsChecked():
self.SetWindowStyle(self.wstyle | wx.STAY_ON_TOP)
else:
self.SetWindowStyle(self.wstyle)
def on_about(self, event):
""" show an about box """
wx.MessageBox(ABOUT, 'About ' + NAME)
def flash_status_message(self, message):
""" flash a message on the status bar """
try:
self.statusbar.SetStatusText(message, 1)
self.timeroff = wx.Timer(self)
self.Bind(wx.EVT_TIMER, lambda event: self.statusbar.SetStatusText('', 1), self.timeroff)
self.timeroff.Start(FLASHTIME, oneShot = True)
except:
pass
#---------------------------------------------------------------------------------------------------
# Test code
#---------------------------------------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) <= 1:
# no command line argument, so run the viewer application
app = wx.PySimpleApp()
app.frame = ViewerFrame()
app.frame.Show()
app.MainLoop()
if (len(sys.argv) == 2) and (sys.argv[1] == 'test'):
# run the viewer in test mode, i.e. generating its own levels for display
app = wx.PySimpleApp()
app.frame = ViewerFrame(test = True)
app.frame.Show()
app.MainLoop()
if (len(sys.argv) == 2) and (sys.argv[1] == 'command'):
# run the viewer in command line mode, i.e. only receiving levels and printing them to the console
print 'Running viewer in command line mode - press any key to stop...'
thread = createViewerLevelThread(frame = None, verbose = True)
thread.start()
# wait until a key is pressed
stop = False
while not stop:
if msvcrt.kbhit():
c = msvcrt.getch()
stop = True
time.sleep(0.1)
# stop the thread
thread.active = False
thread.join()
if (len(sys.argv) == 2) and (sys.argv[1] == 'dummy'):
# run a dummy Aimsun/Noysim2 client that sends random levels (for use with viewer in normal or command line mode)
print 'Running dummy Aimsun/Noysim2 client - press any key to stop...'
client = createLevelBuffer(verbose = True, sleep = 1000*TESTSLEEP)
client.sendClear()
stop = False
(t, dt) = (0.0, TESTDT)
while not stop:
t += dt
client.sendLevels(t = t, levels = dict([(loc, randomLevel()) for loc in TESTLOCS]))
if msvcrt.kbhit():
c = msvcrt.getch()
stop = True
| mit |
craigcitro/pydatalab | tests/bigquery/schema_tests.py | 6 | 4284 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import collections
import pandas
import sys
import unittest
import google.datalab.bigquery
import google.datalab.utils
class TestCases(unittest.TestCase):
def test_schema_from_dataframe(self):
df = TestCases._create_data_frame()
result = google.datalab.bigquery.Schema.from_data(df)
self.assertEqual(google.datalab.bigquery.Schema.from_data(TestCases._create_inferred_schema()),
result)
def test_schema_from_data(self):
variant1 = [
3,
2.0,
True,
['cow', 'horse', [0, []]]
]
variant2 = collections.OrderedDict()
variant2['Column1'] = 3
variant2['Column2'] = 2.0
variant2['Column3'] = True
variant2['Column4'] = collections.OrderedDict()
variant2['Column4']['Column1'] = 'cow'
variant2['Column4']['Column2'] = 'horse'
variant2['Column4']['Column3'] = collections.OrderedDict()
variant2['Column4']['Column3']['Column1'] = 0
variant2['Column4']['Column3']['Column2'] = collections.OrderedDict()
master = [
{'name': 'Column1', 'type': 'INTEGER'},
{'name': 'Column2', 'type': 'FLOAT'},
{'name': 'Column3', 'type': 'BOOLEAN'},
{'name': 'Column4', 'type': 'RECORD', 'fields': [
{'name': 'Column1', 'type': 'STRING'},
{'name': 'Column2', 'type': 'STRING'},
{'name': 'Column3', 'type': 'RECORD', 'fields': [
{'name': 'Column1', 'type': 'INTEGER'},
{'name': 'Column2', 'type': 'RECORD', 'fields': []}
]}
]}
]
schema_master = google.datalab.bigquery.Schema(master)
with self.assertRaises(Exception) as error1:
google.datalab.bigquery.Schema.from_data(variant1)
if sys.version_info[0] == 3:
self.assertEquals('Cannot create a schema from heterogeneous list [3, 2.0, True, ' +
'[\'cow\', \'horse\', [0, []]]]; perhaps you meant to use ' +
'Schema.from_record?', str(error1.exception))
else:
self.assertEquals('Cannot create a schema from heterogeneous list [3, 2.0, True, ' +
'[u\'cow\', u\'horse\', [0, []]]]; perhaps you meant to use ' +
'Schema.from_record?', str(error1.exception))
schema3 = google.datalab.bigquery.Schema.from_data([variant1])
schema4 = google.datalab.bigquery.Schema.from_data([variant2])
schema5 = google.datalab.bigquery.Schema.from_data(master)
schema6 = google.datalab.bigquery.Schema.from_record(variant1)
schema7 = google.datalab.bigquery.Schema.from_record(variant2)
self.assertEquals(schema_master, schema3, 'schema inferred from list of lists with from_data')
self.assertEquals(schema_master, schema4, 'schema inferred from list of dicts with from_data')
self.assertEquals(schema_master, schema5, 'schema inferred from BQ schema list with from_data')
self.assertEquals(schema_master, schema6, 'schema inferred from list with from_record')
self.assertEquals(schema_master, schema7, 'schema inferred from dict with from_record')
@staticmethod
def _create_data_frame():
data = {
'some': [
0, 1, 2, 3
],
'column': [
'r0', 'r1', 'r2', 'r3'
],
'headers': [
10.0, 10.0, 10.0, 10.0
]
}
return pandas.DataFrame(data)
@staticmethod
def _create_inferred_schema(extra_field=None):
schema = [
{'name': 'some', 'type': 'INTEGER'},
{'name': 'column', 'type': 'STRING'},
{'name': 'headers', 'type': 'FLOAT'},
]
if extra_field:
schema.append({'name': extra_field, 'type': 'INTEGER'})
return schema
| apache-2.0 |
claesenm/optunity-benchmark | optimizers/tpe/hyperopt_august2013_mod_src/hyperopt/mongoexp.py | 2 | 60046 | """
Mongo-based Experiment driver and worker client
===============================================
Components involved:
- mongo
e.g. mongod ...
- driver
e.g. hyperopt-mongo-search mongo://address bandit_json bandit_algo_json
- worker
e.g. hyperopt-mongo-worker --loop mongo://address
Mongo
=====
Mongo (daemon process mongod) is used for IPC between the driver and worker.
Configure it as you like, so that hyperopt-mongo-search can communicate with it.
I think there is some support in this file for an ssh+mongo connection type.
The experiment uses the following collections for IPC:
* jobs - documents of a standard form used to store suggested trials and their
results. These documents have keys:
* spec : subdocument returned by bandit_algo.suggest
* exp_key: an identifier of which driver suggested this trial
* cmd: a tuple (protocol, ...) identifying bandit.evaluate
* state: 0, 1, 2, 3 for job state (new, running, ok, fail)
* owner: None for new jobs, (hostname, pid) for started jobs
* book_time: time a job was reserved
* refresh_time: last time the process running the job checked in
* result: the subdocument returned by bandit.evaluate
* error: for jobs of state 3, a reason for failure.
* logs: a dict of sequences of strings received by ctrl object
* info: info messages
* warn: warning messages
* error: error messages
* fs - a gridfs storage collection (used for pickling)
* drivers - documents describing drivers. These are used to prevent two drivers
from using the same exp_key simultaneously, and to attach saved states.
* exp_key
* workdir: [optional] path where workers should chdir to
Attachments:
* pkl: [optional] saved state of experiment class
* bandit_args_kwargs: [optional] pickled (clsname, args, kwargs) to
reconstruct bandit in worker processes
The MongoJobs, MongoExperiment, and CtrlObj classes as well as the main_worker
method form the abstraction barrier around this database layout.
Driver
======
A driver directs an experiment, by calling a bandit_algo to suggest trial
points, and queuing them in mongo so that a worker can evaluate that trial
point.
The hyperopt-mongo-search script creates a single MongoExperiment instance, and
calls its run() method.
Saving and Resuming
-------------------
The command
"hyperopt-mongo-search bandit algo"
creates a new experiment or resumes an existing experiment.
The command
"hyperopt-mongo-search --exp-key=<EXPKEY>"
can only resume an existing experiment.
The command
"hyperopt-mongo-search --clear-existing bandit algo"
can only create a new experiment, and potentially deletes an existing one.
The command
"hyperopt-mongo-search --clear-existing --exp-key=EXPKEY bandit algo"
can only create a new experiment, and potentially deletes an existing one.
By default, MongoExperiment.run will try to save itself before returning. It
does so by pickling itself to a file called 'exp_key' in the fs collection.
Resuming means unpickling that file and calling run again.
The MongoExperiment instance itself is minimal (a key, a bandit, a bandit algo,
a workdir, a poll interval). The only stateful element is the bandit algo. The
difference between resume and start is in the handling of the bandit algo.
Worker
======
A worker looks up a job in a mongo database, maps that job document to a
runnable python object, calls that object, and writes the return value back to
the database.
A worker *reserves* a job by atomically identifying a document in the jobs
collection whose owner is None and whose state is 0, and setting the state to
1. If it fails to identify such a job, it loops with a random sleep interval
of a few seconds and polls the database.
If hyperopt-mongo-worker is called with a --loop argument then it goes back to
the database after finishing a job to identify and perform another one.
CtrlObj
-------
The worker allocates a CtrlObj and passes it to bandit.evaluate in addition to
the subdocument found at job['spec']. A bandit can use ctrl.info, ctrl.warn,
ctrl.error and so on like logger methods, and those messages will be written
to the mongo database (to job['logs']). They are not written synchronously
though, they are written when the bandit.evaluate function calls
ctrl.checkpoint().
Ctrl.checkpoint does several things:
* flushes logging messages to the database
* updates the refresh_time
* optionally updates the result subdocument
The main_worker routine calls Ctrl.checkpoint(rval) once after the
bandit.evalute function has returned before setting the state to 2 or 3 to
finalize the job in the database.
"""
__authors__ = ["James Bergstra", "Dan Yamins"]
__license__ = "3-clause BSD License"
__contact__ = "github.com/jaberg/hyperopt"
import copy
try:
import dill as cPickle
except ImportError:
import cPickle
import hashlib
import logging
import optparse
import os
import shutil
import signal
import socket
import subprocess
import sys
import time
import urlparse
import warnings
import numpy
import pymongo
import gridfs
from bson import SON
logger = logging.getLogger(__name__)
from .base import JOB_STATES
from .base import (JOB_STATE_NEW, JOB_STATE_RUNNING, JOB_STATE_DONE,
JOB_STATE_ERROR)
from .base import Experiment
from .base import Trials
from .base import trials_from_docs
from .base import InvalidTrial
from .base import Ctrl
from .base import SONify
from .base import spec_from_misc
from .utils import coarse_utcnow
from .utils import fast_isin
from .utils import get_most_recent_inds
from .utils import json_call
import plotting
class OperationFailure(Exception):
"""Proxy that could be factored out if we also want to use CouchDB and
JobmanDB classes with this interface
"""
class Shutdown(Exception):
"""
Exception for telling mongo_worker loop to quit
"""
class WaitQuit(Exception):
"""
Exception for telling mongo_worker loop to quit
"""
class InvalidMongoTrial(InvalidTrial):
pass
class BanditSwapError(Exception):
"""Raised when the search program tries to change the bandit attached to
an experiment.
"""
class ReserveTimeout(Exception):
"""No job was reserved in the alotted time
"""
def read_pw():
username = 'hyperopt'
password = open(os.path.join(os.getenv('HOME'), ".hyperopt")).read()[:-1]
return dict(
username=username,
password=password)
def authenticate_for_db(db):
d = read_pw()
db.authenticate(d['username'], d['password'])
def parse_url(url, pwfile=None):
"""Unpacks a url of the form
protocol://[username[:pw]]@hostname[:port]/db/collection
:rtype: tuple of strings
:returns: protocol, username, password, hostname, port, dbname, collection
:note:
If the password is not given in the url but the username is, then
this function will read the password from file by calling
``open(pwfile).read()[:-1]``
"""
protocol=url[:url.find(':')]
ftp_url='ftp'+url[url.find(':'):]
# -- parse the string as if it were an ftp address
tmp = urlparse.urlparse(ftp_url)
logger.info( 'PROTOCOL %s'% protocol)
logger.info( 'USERNAME %s'% tmp.username)
logger.info( 'HOSTNAME %s'% tmp.hostname)
logger.info( 'PORT %s'% tmp.port)
logger.info( 'PATH %s'% tmp.path)
try:
_, dbname, collection = tmp.path.split('/')
except:
print >> sys.stderr, "Failed to parse '%s'"%(str(tmp.path))
raise
logger.info( 'DB %s'% dbname)
logger.info( 'COLLECTION %s'% collection)
if tmp.password is None:
if (tmp.username is not None) and pwfile:
password = open(pwfile).read()[:-1]
else:
password = None
else:
password = tmp.password
logger.info( 'PASS %s'% password)
return (protocol, tmp.username, password, tmp.hostname, tmp.port, dbname,
collection)
def connection_with_tunnel(host='localhost',
auth_dbname='admin', port=27017,
ssh=False, user='hyperopt', pw=None):
if ssh:
local_port=numpy.random.randint(low=27500, high=28000)
# -- forward from local to remote machine
ssh_tunnel = subprocess.Popen(
['ssh', '-NTf', '-L',
'%i:%s:%i'%(local_port, '127.0.0.1', port),
host],
#stdin=subprocess.PIPE,
#stdout=subprocess.PIPE,
#stderr=subprocess.PIPE,
)
# -- give the subprocess time to set up
time.sleep(.5)
connection = pymongo.Connection('127.0.0.1', local_port,
document_class=SON)
else:
connection = pymongo.Connection(host, port, document_class=SON)
if user:
if user == 'hyperopt':
authenticate_for_db(connection[auth_dbname])
else:
raise NotImplementedError()
ssh_tunnel=None
return connection, ssh_tunnel
def connection_from_string(s):
protocol, user, pw, host, port, db, collection = parse_url(s)
if protocol == 'mongo':
ssh=False
elif protocol in ('mongo+ssh', 'ssh+mongo'):
ssh=True
else:
raise ValueError('unrecognized protocol for MongoJobs', protocol)
connection, tunnel = connection_with_tunnel(
ssh=ssh,
user=user,
pw=pw,
host=host,
port=port,
)
return connection, tunnel, connection[db], connection[db][collection]
class MongoJobs(object):
"""
# Interface to a Jobs database structured like this
#
# Collections:
#
# db.jobs - structured {config_name, 'cmd', 'owner', 'book_time',
# 'refresh_time', 'state', 'exp_key', 'owner', 'result'}
# This is the collection that the worker nodes write to
#
# db.gfs - file storage via gridFS for all collections
#
"""
def __init__(self, db, jobs, gfs, conn, tunnel, config_name):
self.db = db
self.jobs = jobs
self.gfs = gfs
self.conn=conn
self.tunnel=tunnel
self.config_name = config_name
# TODO: rename jobs -> coll throughout
coll = property(lambda s : s.jobs)
@classmethod
def alloc(cls, dbname, host='localhost',
auth_dbname='admin', port=27017,
jobs_coll='jobs', gfs_coll='fs', ssh=False, user=None, pw=None):
connection, tunnel = connection_with_tunnel(
host, auth_dbname, port, ssh, user, pw)
db = connection[dbname]
gfs = gridfs.GridFS(db, collection=gfs_coll)
return cls(db, db[jobs_coll], gfs, connection, tunnel)
@classmethod
def new_from_connection_str(cls, conn_str, gfs_coll='fs', config_name='spec'):
connection, tunnel, db, coll = connection_from_string(conn_str)
gfs = gridfs.GridFS(db, collection=gfs_coll)
return cls(db, coll, gfs, connection, tunnel, config_name)
def __iter__(self):
return self.jobs.find()
def __len__(self):
try:
return self.jobs.count()
except:
return 0
def create_jobs_indexes(self):
jobs = self.db.jobs
for k in ['exp_key', 'result.loss', 'book_time']:
jobs.create_index(k)
def create_drivers_indexes(self):
drivers = self.db.drivers
drivers.create_index('exp_key', unique=True)
def create_indexes(self):
self.create_jobs_indexes()
self.create_drivers_indexes()
def jobs_complete(self, cursor=False):
c = self.jobs.find(spec=dict(state=JOB_STATE_DONE))
return c if cursor else list(c)
def jobs_error(self, cursor=False):
c = self.jobs.find(spec=dict(state=JOB_STATE_ERROR))
return c if cursor else list(c)
def jobs_running(self, cursor=False):
if cursor:
raise NotImplementedError()
rval = list(self.jobs.find(spec=dict(state=JOB_STATE_RUNNING)))
#TODO: mark some as MIA
rval = [r for r in rval if not r.get('MIA', False)]
return rval
def jobs_dead(self, cursor=False):
if cursor:
raise NotImplementedError()
rval = list(self.jobs.find(spec=dict(state=JOB_STATE_RUNNING)))
#TODO: mark some as MIA
rval = [r for r in rval if r.get('MIA', False)]
return rval
def jobs_queued(self, cursor=False):
c = self.jobs.find(spec=dict(state=JOB_STATE_NEW))
return c if cursor else list(c)
def insert(self, job, safe=True):
"""Return a job dictionary by inserting the job dict into the database"""
try:
cpy = copy.deepcopy(job)
# this call adds an _id field to cpy
_id = self.jobs.insert(cpy, safe=safe, check_keys=True)
# so now we return the dict with the _id field
assert _id == cpy['_id']
return cpy
except pymongo.errors.OperationFailure, e:
raise OperationFailure(e)
def delete(self, job, safe=True):
"""Delete job[s]"""
try:
self.jobs.remove(job, safe=safe)
except pymongo.errors.OperationFailure, e:
raise OperationFailure(e)
def delete_all(self, cond={}, safe=True):
"""Delete all jobs and attachments"""
try:
for d in self.jobs.find(spec=cond, fields=['_id', '_attachments']):
logger.info('deleting job %s' % d['_id'])
for name, file_id in d.get('_attachments', []):
try:
self.gfs.delete(file_id)
except gridfs.errors.NoFile:
logger.error('failed to remove attachment %s:%s' % (
name, file_id))
self.jobs.remove(d, safe=safe)
except pymongo.errors.OperationFailure, e:
raise OperationFailure(e)
def delete_all_error_jobs(self, safe=True):
return self.delete_all(cond={'state': JOB_STATE_ERROR}, safe=safe)
def reserve(self, host_id, cond=None, exp_key=None):
now = coarse_utcnow()
if cond is None:
cond = {}
else:
cond = copy.copy(cond) #copy is important, will be modified, but only the top-level
if exp_key is not None:
cond['exp_key'] = exp_key
#having an owner of None implies state==JOB_STATE_NEW, so this effectively
#acts as a filter to make sure that only new jobs get reserved.
if cond.get('owner') is not None:
raise ValueError('refusing to reserve owned job')
else:
cond['owner'] = None
cond['state'] = JOB_STATE_NEW #theoretically this is redundant, theoretically
try:
rval = self.jobs.find_and_modify(
cond,
{'$set':
{'owner': host_id,
'book_time': now,
'state': JOB_STATE_RUNNING,
'refresh_time': now,
}
},
new=True,
safe=True,
upsert=False)
except pymongo.errors.OperationFailure, e:
logger.error('Error during reserve_job: %s'%str(e))
rval = None
return rval
def refresh(self, doc, safe=False):
self.update(doc, dict(refresh_time=coarse_utcnow()), safe=False)
def update(self, doc, dct, safe=True, collection=None):
"""Return union of doc and dct, after making sure that dct has been
added to doc in `collection`.
This function does not modify either `doc` or `dct`.
safe=True means error-checking is done. safe=False means this function will succeed
regardless of what happens with the db.
"""
if collection is None:
collection = self.coll
dct = copy.deepcopy(dct)
if '_id' not in doc:
raise ValueError('doc must have an "_id" key to be updated')
if '_id' in dct:
if dct['_id'] != doc['_id']:
raise ValueError('cannot update the _id field')
del dct['_id']
if 'version' in dct:
if dct['version'] != doc['version']:
warnings.warn('Ignoring "version" field in update dictionary')
if 'version' in doc:
doc_query = dict(_id=doc['_id'], version=doc['version'])
dct['version'] = doc['version']+1
else:
doc_query = dict(_id=doc['_id'])
dct['version'] = 1
try:
# warning - if doc matches nothing then this function succeeds
# N.B. this matches *at most* one entry, and possibly zero
collection.update(
doc_query,
{'$set': dct},
safe=True,
upsert=False,
multi=False,)
except pymongo.errors.OperationFailure, e:
# translate pymongo failure into generic failure
raise OperationFailure(e)
# update doc in-place to match what happened on the server side
doc.update(dct)
if safe:
server_doc = collection.find_one(
dict(_id=doc['_id'], version=doc['version']))
if server_doc is None:
raise OperationFailure('updated doc not found : %s'
% str(doc))
elif server_doc != doc:
if 0:# This is all commented out because it is tripping on the fact that
# str('a') != unicode('a').
# TODO: eliminate false alarms and catch real ones
mismatching_keys = []
for k, v in server_doc.items():
if k in doc:
if doc[k] != v:
mismatching_keys.append((k, v, doc[k]))
else:
mismatching_keys.append((k, v, '<missing>'))
for k,v in doc.items():
if k not in server_doc:
mismatching_keys.append((k, '<missing>', v))
raise OperationFailure('local and server doc documents are out of sync: %s'%
repr((doc, server_doc, mismatching_keys)))
return doc
def attachment_names(self, doc):
def as_str(name_id):
assert isinstance(name_id[0], basestring), name_id
return str(name_id[0])
return map(as_str, doc.get('_attachments', []))
def set_attachment(self, doc, blob, name, collection=None):
"""Attach potentially large data string `blob` to `doc` by name `name`
blob must be a string
doc must have been saved in some collection (must have an _id), but not
necessarily the jobs collection.
name must be a string
Returns None
"""
# If there is already a file with the given name for this doc, then we will delete it
# after writing the new file
attachments = doc.get('_attachments', [])
name_matches = [a for a in attachments if a[0] == name]
# the filename is set to something so that fs.list() will display the file
new_file_id = self.gfs.put(blob, filename='%s_%s' % (doc['_id'], name))
logger.info('stored blob of %i bytes with id=%s and filename %s_%s' % (
len(blob), str(new_file_id), doc['_id'], name))
new_attachments = ([a for a in attachments if a[0] != name]
+ [(name, new_file_id)])
try:
ii = 0
doc = self.update(doc, {'_attachments': new_attachments},
collection=collection)
# there is a database leak until we actually delete the files that
# are no longer pointed to by new_attachments
while ii < len(name_matches):
self.gfs.delete(name_matches[ii][1])
ii += 1
except:
while ii < len(name_matches):
logger.warning("Leak during set_attachment: old_file_id=%s" % (
name_matches[ii][1]))
ii += 1
raise
assert len([n for n in self.attachment_names(doc) if n == name]) == 1
#return new_file_id
def get_attachment(self, doc, name):
"""Retrieve data attached to `doc` by `attach_blob`.
Raises OperationFailure if `name` does not correspond to an attached blob.
Returns the blob as a string.
"""
attachments = doc.get('_attachments', [])
file_ids = [a[1] for a in attachments if a[0] == name]
if not file_ids:
raise OperationFailure('Attachment not found: %s' % name)
if len(file_ids) > 1:
raise OperationFailure('multiple name matches', (name, file_ids))
return self.gfs.get(file_ids[0]).read()
def delete_attachment(self, doc, name, collection=None):
attachments = doc.get('_attachments', [])
file_id = None
for i,a in enumerate(attachments):
if a[0] == name:
file_id = a[1]
break
if file_id is None:
raise OperationFailure('Attachment not found: %s' % name)
#print "Deleting", file_id
del attachments[i]
self.update(doc, {'_attachments':attachments}, collection=collection)
self.gfs.delete(file_id)
class MongoTrials(Trials):
"""Trials maps on to an entire mongo collection. It's basically a wrapper
around MongoJobs for now.
As a concession to performance, this object permits trial filtering based
on the exp_key, but I feel that's a hack. The case of `cmd` is similar--
the exp_key and cmd are semantically coupled.
WRITING TO THE DATABASE
-----------------------
The trials object is meant for *reading* a trials database. Writing
to a database is different enough from writing to an in-memory
collection that no attempt has been made to abstract away that
difference. If you want to update the documents within
a MongoTrials collection, then retrieve the `.handle` attribute (a
MongoJobs instance) and use lower-level methods, or pymongo's
interface directly. When you are done writing, call refresh() or
refresh_tids() to bring the MongoTrials up to date.
"""
async = True
def __init__(self, arg, exp_key=None, cmd=None, workdir=None,
refresh=True):
if isinstance(arg, MongoJobs):
self.handle = arg
else:
connection_string = arg
self.handle = MongoJobs.new_from_connection_str(connection_string)
self.handle.create_indexes()
self._exp_key = exp_key
self.cmd = cmd
self.workdir = workdir
if refresh:
self.refresh()
def view(self, exp_key=None, cmd=None, workdir=None, refresh=True):
rval = self.__class__(self.handle,
exp_key=self._exp_key if exp_key is None else exp_key,
cmd=self.cmd if cmd is None else cmd,
workdir=self.workdir if workdir is None else workdir,
refresh=refresh)
return rval
def refresh_tids(self, tids):
""" Sync documents with `['tid']` in the list of `tids` from the
database (not *to* the database).
Local trial documents whose tid is not in `tids` are not
affected by this call. Local trial documents whose tid is in `tids` may
be:
* *deleted* (if db no longer has corresponding document), or
* *updated* (if db has an updated document) or,
* *left alone* (if db document matches local one).
Additionally, if the db has a matching document, but there is no
local trial with a matching tid, then the db document will be
*inserted* into the local collection.
"""
exp_key = self._exp_key
if exp_key != None:
query = {'exp_key' : exp_key}
else:
query = {}
t0 = time.time()
query['state'] = {'$ne': JOB_STATE_ERROR}
if tids is not None:
query['tid'] = {'$in': list(tids)}
orig_trials = getattr(self, '_trials', [])
_trials = orig_trials[:] #copy to make sure it doesn't get screwed up
if _trials:
db_data = list(self.handle.jobs.find(query,
fields=['_id', 'version']))
# -- pull down a fresh list of ids from mongo
if db_data:
#make numpy data arrays
db_data = numpy.rec.array([(x['_id'], int(x['version']))
for x in db_data],
names=['_id', 'version'])
db_data.sort(order=['_id', 'version'])
db_data = db_data[get_most_recent_inds(db_data)]
existing_data = numpy.rec.array([(x['_id'],
int(x['version'])) for x in _trials],
names=['_id', 'version'])
existing_data.sort(order=['_id', 'version'])
#which records are in db but not in existing, and vice versa
db_in_existing = fast_isin(db_data['_id'], existing_data['_id'])
existing_in_db = fast_isin(existing_data['_id'], db_data['_id'])
#filtering out out-of-date records
_trials = [_trials[_ind] for _ind in existing_in_db.nonzero()[0]]
#new data is what's in db that's not in existing
new_data = db_data[numpy.invert(db_in_existing)]
#having removed the new and out of data data,
#concentrating on data in db and existing for state changes
db_data = db_data[db_in_existing]
existing_data = existing_data[existing_in_db]
try:
assert len(db_data) == len(existing_data)
assert (existing_data['_id'] == db_data['_id']).all()
assert (existing_data['version'] <= db_data['version']).all()
except:
reportpath = os.path.join(os.getcwd(),
'hyperopt_refresh_crash_report_' + \
str(numpy.random.randint(1e8)) + '.pkl')
logger.error('HYPEROPT REFRESH ERROR: writing error file to %s' % reportpath)
_file = open(reportpath, 'w')
cPickle.dump({'db_data': db_data,
'existing_data': existing_data},
_file)
_file.close()
raise
same_version = existing_data['version'] == db_data['version']
_trials = [_trials[_ind] for _ind in same_version.nonzero()[0]]
version_changes = existing_data[numpy.invert(same_version)]
#actually get the updated records
update_ids = new_data['_id'].tolist() + version_changes['_id'].tolist()
num_new = len(update_ids)
update_query = copy.deepcopy(query)
update_query['_id'] = {'$in': update_ids}
updated_trials = list(self.handle.jobs.find(update_query))
_trials.extend(updated_trials)
else:
num_new = 0
_trials = []
else:
#this case is for performance, though should be able to be removed
#without breaking correctness.
_trials = list(self.handle.jobs.find(query))
if _trials:
_trials = [_trials[_i] for _i in get_most_recent_inds(_trials)]
num_new = len(_trials)
logger.debug('Refresh data download took %f seconds for %d ids' %
(time.time() - t0, num_new))
if tids is not None:
# -- If tids were given, then _trials only contains
# documents with matching tids. Here we augment these
# fresh matching documents, with our current ones whose
# tids don't match.
new_trials = _trials
tids_set = set(tids)
assert all(t['tid'] in tids_set for t in new_trials)
old_trials = [t for t in orig_trials if t['tid'] not in tids_set]
_trials = new_trials + old_trials
# -- reassign new trials to self, in order of increasing tid
jarray = numpy.array([j['_id'] for j in _trials])
jobsort = jarray.argsort()
self._trials = [_trials[_idx] for _idx in jobsort]
self._specs = [_trials[_idx]['spec'] for _idx in jobsort]
self._results = [_trials[_idx]['result'] for _idx in jobsort]
self._miscs = [_trials[_idx]['misc'] for _idx in jobsort]
def refresh(self):
self.refresh_tids(None)
def _insert_trial_docs(self, docs):
rval = []
for doc in docs:
rval.append(self.handle.jobs.insert(doc, safe=True))
return rval
def count_by_state_unsynced(self, arg):
exp_key = self._exp_key
# TODO: consider searching by SON rather than dict
if isinstance(arg, int):
if arg not in JOB_STATES:
raise ValueError('invalid state', arg)
query = dict(state=arg)
else:
assert hasattr(arg, '__iter__')
states = list(arg)
assert all([x in JOB_STATES for x in states])
query = dict(state={'$in': states})
if exp_key != None:
query['exp_key'] = exp_key
rval = self.handle.jobs.find(query).count()
return rval
def delete_all(self, cond=None):
if cond is None:
cond = {}
else:
cond = dict(cond)
if self._exp_key:
cond['exp_key'] = self._exp_key
# -- remove all documents matching condition
self.handle.delete_all(cond)
gfs = self.handle.gfs
for filename in gfs.list():
try:
fdoc = gfs.get_last_version(filename=filename, **cond)
except gridfs.errors.NoFile:
continue
gfs.delete(fdoc._id)
self.refresh()
def new_trial_ids(self, N):
db = self.handle.db
# N.B. that the exp key is *not* used here. It was once, but it caused
# a nasty bug: tids were generated by a global experiment
# with exp_key=None, running a BanditAlgo that introduced sub-experiments
# with exp_keys, which ran jobs that did result injection. The tids of
# injected jobs were sometimes unique within an experiment, and
# sometimes not. Hilarious!
#
# Solution: tids are generated to be unique across the db, not just
# within an exp_key.
#
# -- mongo docs say you can't upsert an empty document
query = {'a': 0}
doc = None
while doc is None:
doc = db.job_ids.find_and_modify(
query,
{'$inc' : {'last_id': N}},
upsert=True,
safe=True)
if doc is None:
logger.warning('no last_id found, re-trying')
time.sleep(1.0)
lid = doc.get('last_id', 0)
return range(lid, lid + N)
def trial_attachments(self, trial):
"""
Attachments to a single trial (e.g. learned weights)
Returns a dictionary interface to the attachments.
"""
# don't offer more here than in MongoCtrl
class Attachments(object):
def __contains__(_self, name):
return name in self.handle.attachment_names(doc=trial)
def __len__(_self):
return len(self.handle.attachment_names(doc=trial))
def __iter__(_self):
return iter(self.handle.attachment_names(doc=trial))
def __getitem__(_self, name):
try:
return self.handle.get_attachment(
doc=trial,
name=name)
except OperationFailure:
raise KeyError(name)
def __setitem__(_self, name, value):
self.handle.set_attachment(
doc=trial,
blob=value,
name=name,
collection=self.handle.db.jobs)
def __delitem__(_self, name):
raise NotImplementedError('delete trial_attachment')
def keys(self):
return [k for k in self]
def values(self):
return [self[k] for k in self]
def items(self):
return [(k, self[k]) for k in self]
return Attachments()
@property
def attachments(self):
"""
Attachments to a Trials set (such as bandit args).
Support syntax for load: self.attachments[name]
Support syntax for store: self.attachments[name] = value
"""
gfs = self.handle.gfs
query = {}
if self._exp_key:
query['exp_key'] = self._exp_key
class Attachments(object):
def __iter__(_self):
if query:
# -- gfs.list does not accept query kwargs
# (at least, as of pymongo 2.4)
filenames = [fname
for fname in gfs.list()
if fname in _self]
else:
filenames = gfs.list()
return iter(filenames)
def __contains__(_self, name):
return gfs.exists(filename=name, **query)
def __getitem__(_self, name):
try:
rval = gfs.get_version(filename=name, **query).read()
return rval
except gridfs.NoFile:
raise KeyError(name)
def __setitem__(_self, name, value):
if gfs.exists(filename=name, **query):
gout = gfs.get_last_version(filename=name, **query)
gfs.delete(gout._id)
gfs.put(value, filename=name, **query)
def __delitem__(_self, name):
gout = gfs.get_last_version(filename=name, **query)
gfs.delete(gout._id)
return Attachments()
class MongoWorker(object):
poll_interval = 3.0 # -- seconds
workdir = None
def __init__(self, mj,
poll_interval=poll_interval,
workdir=workdir,
exp_key=None,
logfilename='logfile.txt',
):
"""
mj - MongoJobs interface to jobs collection
poll_interval - seconds
workdir - string
exp_key - restrict reservations to this key
"""
self.mj = mj
self.poll_interval = poll_interval
self.workdir = workdir
self.exp_key = exp_key
self.logfilename = logfilename
def make_log_handler(self):
self.log_handler = logging.FileHandler(self.logfilename)
self.log_handler.setFormatter(
logging.Formatter(
fmt='%(levelname)s (%(name)s): %(message)s'))
self.log_handler.setLevel(logging.INFO)
def run_one(self,
host_id=None,
reserve_timeout=None,
erase_created_workdir=False,
):
if host_id == None:
host_id = '%s:%i'%(socket.gethostname(), os.getpid()),
job = None
start_time = time.time()
mj = self.mj
while job is None:
if (reserve_timeout
and (time.time() - start_time) > reserve_timeout):
raise ReserveTimeout()
job = mj.reserve(host_id, exp_key=self.exp_key)
if not job:
interval = (1 +
numpy.random.rand()
* (float(self.poll_interval) - 1.0))
logger.info('no job found, sleeping for %.1fs' % interval)
time.sleep(interval)
logger.debug('job found: %s' % str(job))
# -- don't let the cmd mess up our trial object
spec = spec_from_misc(job['misc'])
ctrl = MongoCtrl(
trials=MongoTrials(mj, exp_key=job['exp_key'], refresh=False),
read_only=False,
current_trial=job)
if self.workdir is None:
workdir = job['misc'].get('workdir', os.getcwd())
if workdir is None:
workdir = ''
workdir = os.path.join(workdir, str(job['_id']))
else:
workdir = self.workdir
workdir = os.path.abspath(os.path.expanduser(workdir))
cwd = os.getcwd()
sentinal = None
if not os.path.isdir(workdir):
# -- figure out the closest point to the workdir in the filesystem
closest_dir = ''
for wdi in os.path.split(workdir):
if os.path.isdir(os.path.join(closest_dir, wdi)):
closest_dir = os.path.join(closest_dir, wdi)
else:
break
assert closest_dir != workdir
# -- touch a sentinal file so that recursive directory
# removal stops at the right place
sentinal = os.path.join(closest_dir, wdi + '.inuse')
logger.debug("touching sentinal file: %s" % sentinal)
open(sentinal, 'w').close()
# -- now just make the rest of the folders
logger.debug("making workdir: %s" % workdir)
os.makedirs(workdir)
try:
root_logger = logging.getLogger()
if self.logfilename:
self.make_log_handler()
root_logger.addHandler(self.log_handler)
cmd = job['misc']['cmd']
cmd_protocol = cmd[0]
try:
if cmd_protocol == 'cpickled fn':
worker_fn = cPickle.loads(cmd[1])
elif cmd_protocol == 'call evaluate':
bandit = cPickle.loads(cmd[1])
worker_fn = bandit.evaluate
elif cmd_protocol == 'token_load':
cmd_toks = cmd[1].split('.')
cmd_module = '.'.join(cmd_toks[:-1])
worker_fn = exec_import(cmd_module, cmd[1])
elif cmd_protocol == 'bandit_json evaluate':
bandit = json_call(cmd[1])
worker_fn = bandit.evaluate
elif cmd_protocol == 'driver_attachment':
#name = 'driver_attachment_%s' % job['exp_key']
blob = ctrl.trials.attachments[cmd[1]]
bandit_name, bandit_args, bandit_kwargs = cPickle.loads(blob)
worker_fn = json_call(bandit_name,
args=bandit_args,
kwargs=bandit_kwargs).evaluate
elif cmd_protocol == 'domain_attachment':
blob = ctrl.trials.attachments[cmd[1]]
try:
domain = cPickle.loads(blob)
except BaseException, e:
logger.info('Error while unpickling. Try installing dill via "pip install dill" for enhanced pickling support.')
raise
worker_fn = domain.evaluate
else:
raise ValueError('Unrecognized cmd protocol', cmd_protocol)
result = worker_fn(spec, ctrl)
result = SONify(result)
except BaseException, e:
#XXX: save exception to database, but if this fails, then
# at least raise the original traceback properly
logger.info('job exception: %s' % str(e))
ctrl.checkpoint()
mj.update(job,
{'state': JOB_STATE_ERROR,
'error': (str(type(e)), str(e))},
safe=True)
raise
finally:
if self.logfilename:
root_logger.removeHandler(self.log_handler)
os.chdir(cwd)
logger.info('job finished: %s' % str(job['_id']))
attachments = result.pop('attachments', {})
for aname, aval in attachments.items():
logger.info(
'mongoexp: saving attachment name=%s (%i bytes)' % (
aname, len(aval)))
ctrl.attachments[aname] = aval
ctrl.checkpoint(result)
mj.update(job, {'state': JOB_STATE_DONE}, safe=True)
if sentinal:
if erase_created_workdir:
logger.debug('MongoWorker.run_one: rmtree %s' % workdir)
shutil.rmtree(workdir)
# -- put it back so that recursive removedirs works
os.mkdir(workdir)
# -- recursive backtrack to sentinal
logger.debug('MongoWorker.run_one: removedirs %s'
% workdir)
os.removedirs(workdir)
# -- remove sentinal
logger.debug('MongoWorker.run_one: rm %s' % sentinal)
os.remove(sentinal)
class MongoCtrl(Ctrl):
"""
Attributes:
current_trial - current job document
jobs - MongoJobs object in which current_trial resides
read_only - True means don't change the db
"""
def __init__(self, trials, current_trial, read_only):
self.trials = trials
self.current_trial = current_trial
self.read_only = read_only
def debug(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.debug(*args, **kwargs)
def info(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.info(*args, **kwargs)
def warn(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.warn(*args, **kwargs)
def error(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.error(*args, **kwargs)
def checkpoint(self, result=None):
if not self.read_only:
handle = self.trials.handle
handle.refresh(self.current_trial)
if result is not None:
return handle.update(self.current_trial, dict(result=result))
@property
def attachments(self):
"""
Support syntax for load: self.attachments[name]
Support syntax for store: self.attachments[name] = value
"""
return self.trials.trial_attachments(trial=self.current_trial)
@property
def set_attachment(self):
# XXX: Is there a better deprecation error?
raise RuntimeError(
'set_attachment deprecated. Use `self.attachments[name] = value`')
def exec_import(cmd_module, cmd):
worker_fn = None
exec('import %s; worker_fn = %s' % (cmd_module, cmd))
return worker_fn
def as_mongo_str(s):
if s.startswith('mongo://'):
return s
else:
return 'mongo://%s' % s
def main_worker_helper(options, args):
N = int(options.max_jobs)
if options.last_job_timeout is not None:
last_job_timeout = time.time() + float(options.last_job_timeout)
else:
last_job_timeout = None
def sighandler_shutdown(signum, frame):
logger.info('Caught signal %i, shutting down.' % signum)
raise Shutdown(signum)
def sighandler_wait_quit(signum, frame):
logger.info('Caught signal %i, shutting down.' % signum)
raise WaitQuit(signum)
signal.signal(signal.SIGINT, sighandler_shutdown)
signal.signal(signal.SIGHUP, sighandler_shutdown)
signal.signal(signal.SIGTERM, sighandler_shutdown)
signal.signal(signal.SIGUSR1, sighandler_wait_quit)
if N > 1:
proc = None
cons_errs = 0
if last_job_timeout and time.time() > last_job_timeout:
logger.info("Exiting due to last_job_timeout")
return
while N and cons_errs < int(options.max_consecutive_failures):
try:
# recursive Popen, dropping N from the argv
# By using another process to run this job
# we protect ourselves from memory leaks, bad cleanup
# and other annoying details.
# The tradeoff is that a large dataset must be reloaded once for
# each subprocess.
sub_argv = [sys.argv[0],
'--poll-interval=%s' % options.poll_interval,
'--max-jobs=1',
'--mongo=%s' % options.mongo,
'--reserve-timeout=%s' % options.reserve_timeout]
if options.workdir is not None:
sub_argv.append('--workdir=%s' % options.workdir)
if options.exp_key is not None:
sub_argv.append('--exp-key=%s' % options.exp_key)
proc = subprocess.Popen(sub_argv)
retcode = proc.wait()
proc = None
except Shutdown:
#this is the normal way to stop the infinite loop (if originally N=-1)
if proc:
#proc.terminate() is only available as of 2.6
os.kill(proc.pid, signal.SIGTERM)
return proc.wait()
else:
return 0
except WaitQuit:
# -- sending SIGUSR1 to a looping process will cause it to
# break out of the loop after the current subprocess finishes
# normally.
if proc:
return proc.wait()
else:
return 0
if retcode != 0:
cons_errs += 1
else:
cons_errs = 0
N -= 1
logger.info("exiting with N=%i after %i consecutive exceptions" %(
N, cons_errs))
elif N == 1:
# XXX: the name of the jobs collection is a parameter elsewhere,
# so '/jobs' should not be hard-coded here
mj = MongoJobs.new_from_connection_str(
as_mongo_str(options.mongo) + '/jobs')
mworker = MongoWorker(mj,
float(options.poll_interval),
workdir=options.workdir,
exp_key=options.exp_key)
mworker.run_one(reserve_timeout=float(options.reserve_timeout))
else:
raise ValueError("N <= 0")
def main_worker():
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--exp-key",
dest='exp_key',
default = None,
metavar='str',
help="identifier for this workers's jobs")
parser.add_option("--last-job-timeout",
dest='last_job_timeout',
metavar='T',
default=None,
help="Do not reserve a job after T seconds have passed")
parser.add_option("--max-consecutive-failures",
dest="max_consecutive_failures",
metavar='N',
default=4,
help="stop if N consecutive jobs fail (default: 4)")
parser.add_option("--max-jobs",
dest='max_jobs',
default=sys.maxint,
help="stop after running this many jobs (default: inf)")
parser.add_option("--mongo",
dest='mongo',
default='localhost/hyperopt',
help="<host>[:port]/<db> for IPC and job storage")
parser.add_option("--poll-interval",
dest='poll_interval',
metavar='N',
default=5,
help="check work queue every 1 < T < N seconds (default: 5")
parser.add_option("--reserve-timeout",
dest='reserve_timeout',
metavar='T',
default=120.0,
help="poll database for up to T seconds to reserve a job")
parser.add_option("--workdir",
dest="workdir",
default=None,
help="root workdir (default: load from mongo)",
metavar="DIR")
(options, args) = parser.parse_args()
if args:
parser.print_help()
return -1
return main_worker_helper(options, args)
def bandit_from_options(options):
#
# Construct bandit
#
bandit_name = options.bandit
if options.bandit_argfile:
bandit_argfile_text = open(options.bandit_argfile).read()
bandit_argv, bandit_kwargs = cPickle.loads(bandit_argfile_text)
else:
bandit_argfile_text = ''
bandit_argv, bandit_kwargs = (), {}
bandit = json_call(bandit_name, bandit_argv, bandit_kwargs)
return (bandit,
(bandit_name, bandit_argv, bandit_kwargs),
bandit_argfile_text)
def algo_from_options(options, bandit):
#
# Construct algo
#
algo_name = options.bandit_algo
if options.bandit_algo_argfile:
# in theory this is easy just as above.
# need tests though, and it's just not done yet.
raise NotImplementedError('Option: --bandit-algo-argfile')
else:
algo_argfile_text = ''
algo_argv, algo_kwargs = (), {}
algo = json_call(algo_name, (bandit,) + algo_argv, algo_kwargs)
return (algo,
(algo_name, (bandit,) + algo_argv, algo_kwargs),
algo_argfile_text)
def expkey_from_options(options, bandit_stuff, algo_stuff):
#
# Determine exp_key
#
if None is options.exp_key:
# -- argfile texts
bandit_name = bandit_stuff[1][0]
algo_name = algo_stuff[1][0]
bandit_argfile_text = bandit_stuff[2]
algo_argfile_text = algo_stuff[2]
if bandit_argfile_text or algo_argfile_text:
m = hashlib.md5()
m.update(bandit_argfile_text)
m.update(algo_argfile_text)
exp_key = '%s/%s[arghash:%s]' % (
bandit_name, algo_name, m.hexdigest())
del m
else:
exp_key = '%s/%s' % (bandit_name, algo_name)
else:
exp_key = options.exp_key
return exp_key
def main_search_helper(options, args, input=input, cmd_type=None):
"""
input is an argument so that unittest can replace stdin
cmd_type can be set to "D.A." to force interpretation of bandit as driver
attachment. This mechanism is used by unit tests.
"""
assert getattr(options, 'bandit', None) is None
assert getattr(options, 'bandit_algo', None) is None
assert len(args) == 2
options.bandit = args[0]
options.bandit_algo = args[1]
bandit_stuff = bandit_from_options(options)
bandit, bandit_NAK, bandit_argfile_text = bandit_stuff
bandit_name, bandit_args, bandit_kwargs = bandit_NAK
algo_stuff = algo_from_options(options, bandit)
algo, algo_NAK, algo_argfile_text = algo_stuff
algo_name, algo_args, algo_kwargs = algo_NAK
exp_key = expkey_from_options(options, bandit_stuff, algo_stuff)
trials = MongoTrials(as_mongo_str(options.mongo) + '/jobs', exp_key)
if options.clear_existing:
print >> sys.stdout, "Are you sure you want to delete",
print >> sys.stdout, ("all %i jobs with exp_key: '%s' ?"
% (
trials.handle.db.jobs.find({'exp_key':exp_key}).count(),
str(exp_key)))
print >> sys.stdout, '(y/n)'
y, n = 'y', 'n'
if input() != 'y':
print >> sys.stdout, "aborting"
return 1
trials.delete_all()
#
# Construct MongoExperiment
#
if bandit_argfile_text or algo_argfile_text or cmd_type=='D.A.':
aname = 'driver_attachment_%s.pkl' % exp_key
if aname in trials.attachments:
atup = cPickle.loads(trials.attachments[aname])
if bandit_NAK != atup:
raise BanditSwapError((bandit_NAK, atup))
else:
try:
blob = cPickle.dumps(bandit_NAK)
except BaseException, e:
print >> sys.stdout, "Error pickling. Try installing dill via 'pip install dill'."
raise e
trials.attachments[aname] = blob
worker_cmd = ('driver_attachment', aname)
else:
worker_cmd = ('bandit_json evaluate', bandit_name)
algo.cmd = worker_cmd
algo.workdir=options.workdir
self = Experiment(trials,
bandit_algo=algo,
poll_interval_secs=(int(options.poll_interval))
if options.poll_interval else 5,
max_queue_len=options.max_queue_len)
self.run(options.steps, block_until_done=options.block)
def main_search():
parser = optparse.OptionParser(
usage="%prog [options] [<bandit> <bandit_algo>]")
parser.add_option("--clear-existing",
action="store_true",
dest="clear_existing",
default=False,
help="clear all jobs with the given exp_key")
parser.add_option("--exp-key",
dest='exp_key',
default = None,
metavar='str',
help="identifier for this driver's jobs")
parser.add_option('--force-lock',
action="store_true",
dest="force_lock",
default=False,
help="ignore concurrent experiments using same exp_key (only do this after a crash)")
parser.add_option("--mongo",
dest='mongo',
default='localhost/hyperopt',
help="<host>[:port]/<db> for IPC and job storage")
parser.add_option("--poll-interval",
dest='poll_interval',
metavar='N',
default=None,
help="check work queue every N seconds (default: 5")
parser.add_option("--no-save-on-exit",
action="store_false",
dest="save_on_exit",
default=True,
help="save driver state to mongo on exit")
parser.add_option("--steps",
dest='steps',
default=sys.maxint,
help="exit after queuing this many jobs (default: inf)")
parser.add_option("--workdir",
dest="workdir",
default=os.path.expanduser('~/.hyperopt.workdir'),
help="direct hyperopt-mongo-worker to chdir here",
metavar="DIR")
parser.add_option("--block",
dest="block",
action="store_true",
default=False,
help="block return until all queue is empty")
parser.add_option("--bandit-argfile",
dest="bandit_argfile",
default=None,
help="path to file containing arguments bandit constructor\n"
"file format: pickle of dictionary containing two keys,\n"
" {'args' : tuple of positional arguments,\n"
" 'kwargs' : dictionary of keyword arguments}")
parser.add_option("--bandit-algo-argfile",
dest="bandit_algo_argfile",
default=None,
help="path to file containing arguments for bandit_algo "
"constructor. File format is pickled dictionary containing "
"two keys:\n"
" 'args', a tuple of positional arguments, and \n"
" 'kwargs', a dictionary of keyword arguments. \n"
"NOTE: instantiated bandit is pre-pended as first element"
" of arg tuple.")
parser.add_option("--max-queue-len",
dest="max_queue_len",
default=1,
help="maximum number of jobs to allow in queue")
(options, args) = parser.parse_args()
if len(args) > 2:
parser.print_help()
return -1
return main_search_helper(options, args)
def main_show_helper(options, args):
if options.trials_pkl:
trials = cPickle.load(open(options.trials_pkl))
else:
bandit_stuff = bandit_from_options(options)
bandit, (bandit_name, bandit_args, bandit_kwargs), bandit_algo_argfile\
= bandit_stuff
algo_stuff = algo_from_options(options, bandit)
algo, (algo_name, algo_args, algo_kwargs), algo_algo_argfile\
= algo_stuff
exp_key = expkey_from_options(options, bandit_stuff, algo_stuff)
trials = MongoTrials(as_mongo_str(options.mongo) + '/jobs', exp_key)
cmd = args[0]
if 'history' == cmd:
if 0:
import matplotlib.pyplot as plt
self.refresh_trials_results()
yvals, colors = zip(*[(1 - r.get('best_epoch_test', .5), 'g')
for y, r in zip(self.losses(), self.results) if y is not None])
plt.scatter(range(len(yvals)), yvals, c=colors)
return plotting.main_plot_history(trials)
elif 'histogram' == cmd:
return plotting.main_plot_histogram(trials)
elif 'dump' == cmd:
raise NotImplementedError('TODO: dump jobs db to stdout as JSON')
elif 'dump_pickle' == cmd:
cPickle.dump(trials_from_docs(trials.trials),
open(args[1], 'w'))
elif 'vars' == cmd:
return plotting.main_plot_vars(trials, bandit=bandit)
else:
logger.error("Invalid cmd %s" % cmd)
parser.print_help()
print """Current supported commands are history, histogram, vars
"""
return -1
def main_show():
parser = optparse.OptionParser(
usage="%prog [options] cmd [...]")
parser.add_option("--exp-key",
dest='exp_key',
default = None,
metavar='str',
help="identifier for this driver's jobs")
parser.add_option("--bandit",
dest='bandit',
default = None,
metavar='json',
help="identifier for the bandit solved by the experiment")
parser.add_option("--bandit-argfile",
dest="bandit_argfile",
default=None,
help="path to file containing arguments bandit constructor\n"
"file format: pickle of dictionary containing two keys,\n"
" {'args' : tuple of positional arguments,\n"
" 'kwargs' : dictionary of keyword arguments}")
parser.add_option("--bandit-algo",
dest='bandit_algo',
default = None,
metavar='json',
help="identifier for the optimization algorithm for experiment")
parser.add_option("--bandit-algo-argfile",
dest="bandit_algo_argfile",
default=None,
help="path to file containing arguments for bandit_algo "
"constructor. File format is pickled dictionary containing "
"two keys:\n"
" 'args', a tuple of positional arguments, and \n"
" 'kwargs', a dictionary of keyword arguments. \n"
"NOTE: instantiated bandit is pre-pended as first element"
" of arg tuple.")
parser.add_option("--mongo",
dest='mongo',
default='localhost/hyperopt',
help="<host>[:port]/<db> for IPC and job storage")
parser.add_option("--trials",
dest="trials_pkl",
default="",
help="local trials file (e.g. created by dump_pickle command)")
parser.add_option("--workdir",
dest="workdir",
default=os.path.expanduser('~/.hyperopt.workdir'),
help="check for worker files here",
metavar="DIR")
(options, args) = parser.parse_args()
try:
cmd = args[0]
except:
parser.print_help()
return -1
return main_show_helper(options, args)
| gpl-3.0 |
sellberg/SACLA2016B8055 | scripts/03_plot_h5.py | 2 | 1294 | #!/home/doniach/dermen/epd731/bin/python
import numpy as np
import h5py
import matplotlib
import matplotlib.pyplot as plt
import argparse
import time
import pandas as pd
import sys
# -- default parameters
run = 448539
file_folder = '/UserData/fperakis/2016_6/01_test/' # h5 files folder
src_folder = '/home/fperakis/2016_06/python_scripts/src' # src files folder
# -- files and folders
file_name = '%d.h5'%(run)
file_path = file_folder+file_name
sys.path.insert(0, src_folder)
from img_class import *
# -- import data
fh5 = h5py.File(file_path, 'r')
run_key = [ k for k in fh5.keys() if k.startswith('run_') ][0]
tags = fh5['/%s/detector_2d_assembled_1'%run_key].keys()[1:]
# -- image generator
num_im = len(tags)
img_gen = ( fh5['%s/detector_2d_assembled_1/%s/detector_data'%(run_key,tag) ].value for tag in tags )
num_im = len(tags)
mean_int = np.zeros(num_im)
# -- average image
im = img_gen.next()
i=0
for im_next in img_gen:
t1 = time.time()
mean_int[i] = np.average(im_next.flatten())
im += im_next
i += 1
print 'R.%d | S.%d/%.d | %.1f Hz'%(run,i,num_im,1.0/(time.time() - t1))
im /= num_im
# -- run mean
total_mean = np.average(im.flatten())
# -- plot
title = 'r.%d - average %d shots'%(run,num_im)
i = img_class(im, title)
i.draw_img()
| bsd-2-clause |
pkruskal/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
berkeley-stat159/project-epsilon | code/utils/scripts/t_test_plot_script.py | 1 | 3981 | """
Purpose:
-----------------------------------------------------------
This script creates graphs for t-test for 4 conditions
For each subject each run each condition, plot the t statistics
-----------------------------------------------------------
"""
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
from t_stat import *
from smoothing import *
from matplotlib import colors
from plot_mosaic import *
import numpy as np
import nibabel as nib
import matplotlib.pyplot as plt
import matplotlib
# Create the necessary directories if they do not exist
dirs = ['../../../fig','../../../fig/t-test']
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
# locate the different paths
project_path = '../../../'
data_path = project_path + 'data/'
txt_path = project_path + 'txt_output/conv_high_res/'
#txt_path = project_path + 'txt_output/conv_normal/'
path_dict = {'data_filtered':{
'folder' : 'ds005/',
'bold_img_name' : 'filtered_func_data_mni.nii.gz',
'run_path' : 'model/model001/',
'feat' : '.feat/'
},
'data_original':{
'folder' : 'ds005/',
'bold_img_name' : 'bold.nii.gz',
'run_path' : 'BOLD/',
'feat' : '/'
}}
# TODO: uncomment for final version
#subject_list = [str(i) for i in range(1,17)]
subject_list = ['1','5']
run_list = [str(i) for i in range(1,2)]
cond_list = [str(i) for i in range(1,5)]
#TODO: Change to relevant path for data or other thing
d = path_dict['data_original']
#OR
#d = path_dict['data_filtered']
images_paths = [('ds005' +'_sub' + s.zfill(3) + '_t1r' + r, \
data_path + d['folder'] + 'sub%s/'%(s.zfill(3)) + d['run_path'] \
+ 'task001_run%s'%(r.zfill(3))+d['feat']+'%s'%( d['bold_img_name'])) \
for r in run_list \
for s in subject_list]
print("\n=====================================================")
thres = 375 #from analysis of the histograms
for image_path in images_paths:
name = image_path[0]
print("Starting t-test analysis and plot for subject "+name[9:12])
img = nib.load(image_path[1])
data_int = img.get_data()
data = data_int.astype(float)
vol_shape = data.shape[:-1]
n_trs = data.shape[-1]
#get the mean value
mean_data = np.mean(data, axis = -1)
#build the mask
in_brain_mask = mean_data > 375
#smooth the data set
smooth_data = smoothing(data, 1, range(n_trs))
#initialize design matrix for t test
p = 7
X_matrix = np.ones((data.shape[-1], p))
#build our design matrix
for cond in range(1,5):
convolved = np.loadtxt(txt_path + name + '_conv_' + str(cond).zfill(3) + '_high_res.txt')
#convolved = np.loadtxt(txt_path + name + '_conv_' + str(cond).zfill(3) + '_canonical.txt')
X_matrix[:,cond] = convolved
linear_drift = np.linspace(-1, 1, n_trs)
X_matrix[:,5] = linear_drift
quadratic_drift = linear_drift ** 2
quadratic_drift -= np.mean(quadratic_drift)
X_matrix[:,6] = quadratic_drift
beta, t, df, p = t_stat(smooth_data, X_matrix)
for cond in range(0,4):
print("Starting test for condition " + str(cond+1))
t_newshape = np.reshape(t[cond,:],vol_shape)
t_newshape[~in_brain_mask]=np.nan
t_T = np.zeros(vol_shape)
for z in range(vol_shape[2]):
t_T[:, :, z] = t_newshape[:,:, z].T
t_plot = plot_mosaic(t_T)
plt.imshow(t_plot,interpolation='nearest', cmap='seismic')
zero_out=max(abs(np.nanmin(t_T)),np.nanmax(t_T))
plt.title(name+'_t_statistics'+'_cond_'+'_%s'%(cond+1))
plt.clim(-zero_out,zero_out)
plt.colorbar()
plt.savefig(dirs[1]+'/'+ name +'_t-test_'+'cond'+str(cond+1)+'.png')
plt.close()
print("\nT-test analysis and plots done for selected subjects")
print("See mosaic plots in project-epsilon/fig/t-test/")
| bsd-3-clause |
arvinsahni/ml4 | flask/app/vizarvin.py | 2 | 9900 | from __future__ import division
from flask import render_template, request, Response, jsonify,redirect,url_for,flash
from werkzeug.utils import secure_filename
from app import app
import pandas
from pandas.util import hash_pandas_object
import json
import psycopg2
import psycopg2.extras
import os
import pandas as pd
import hashlib
import datetime
from datetime import date
import numpy as np
TRAINING_DATA={}
TESTING_DATA={}
ALLOWED_EXTENSIONS=set(['txt','csv'])
SECRET_KEY='ml4all'
app.secret_key='ml4all'
@app.route('/index')
def index():
return render_template('home.html')
@app.route('/viz')
def viz():
return render_template('viz.html')
def to_csv(d, fields):
d.insert(0, fields)
return Response('\n'.join([",".join(map(str, e)) for e in d]), mimetype='text/csv')
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def datset():
return render_template('home.html')
@app.route('/dataset',methods=['POST'])
def upload_file():
train_file_name = 'train'
test_file_name ='test'
error=None
if request.method == 'POST':
# check if the post request has the file part
if train_file_name not in request.files or test_file_name not in request.files:
#flash('No file part')
error='Kindly upload both training and testing files'
#print("helllllo")
#print(request.url)
flash("load files")
#return redirect(request.url)
return render_template('home.html',error=error)
file = request.files[train_file_name]
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
print("hiiio")
print(request.url)
error='Kindly upload both training and testing files'
flash('No selected files')
return redirect(request.url)
#return render_template('home.html',error=error)
if file and allowed_file(file.filename):
flash("training file uplaoded")
filename = secure_filename(file.filename)
print(os.path.abspath(os.path.join('app/','uploads/')))
#file.save(os.path.abspath(os.path.join('app/',app.config['UPLOAD_FOLDER'], filename)))
file.save(os.path.abspath(os.path.join('app/','uploads/', filename)))
print("done")
## convert file to pandas dataframe
#df_train=pd.read_csv(os.path.join('app/',app.config['UPLOAD_FOLDER'], filename))
df_train=pd.read_csv(os.path.join('app/','uploads/', filename))
print("df_train1",df_train.head(5))
## hash the pd , change to binary --> get fom Jason
temp_hash=hash_pandas_object(df_train)
hash_train = hashlib.sha256(str(temp_hash).encode('utf-8','ignore')).hexdigest()
print("hash train1",hash_train)
## update dict ---> key:hash ,value: dataframe
#TRAINING_DATA[hash_train]=df_train
## For the test file
file = request.files[test_file_name]
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
print(request_url)
flash('No selected files')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
#file.save(os.path.abspath(os.path.join('app/',app.config['UPLOAD_FOLDER'], filename)))
file.save(os.path.abspath(os.path.join('app/','uploads/', filename)))
## convert file to pandas dataframe
#df_test=pd.read_csv(os.path.join('app/',app.config['UPLOAD_FOLDER'], filename))
df_test=pd.read_csv(os.path.join('app/','uploads/', filename))
print("df test1",df_test.head(5))
## hash the pd , change to binary --> get fom Jason
temp_hash=hash_pandas_object(df_test)
hash_test = hashlib.sha256(str(temp_hash).encode('utf-8','ignore')).hexdigest()
print("test1",hash_test)
## update dict ---> key:hash ,value: dataframe
if df_train.shape[1]==(df_test.shape[1]-1):
temp=hash_test
hash_test=hash_train
hash_train=temp
temp_df=df_test
df_test=df_train
df_train=temp_df
TESTING_DATA[hash_test]=df_test
TRAINING_DATA[hash_train]=df_train
print("hash train2",hash_train)
print("hash test2",hash_test)
print("df train2",df_train)
print("df_test2",df_test)
flash("Uploaded files all training")
#return redirect('home.html')
#return jsonify({"hash":hash})
#return redirect(request.url)
return redirect(url_for('datset'))
## may look to add another app.route for test data hash but later
#@app.route('/dataset_test',methods=['POST'])
#def upload_testfile():
# file_name = 'test[]'
#
# if request.method == 'POST':
#
# # check if the post request has the file part
# if file_name not in request.files:
# print(request.files)
# flash('No file part')
# return redirect(request.url)
#
# file = request.files[file_name]
#
# print (file.filename)
# # if user does not select file, browser also
# # submit a empty part without filename
# if file.filename == '':
#
# flash('No selected files')
# return redirect(request.url)
# if file and allowed_file(file.filename):
#
# filename = secure_filename(file.filename)
#
# print(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# print(os.getcwd())
# file.save(os.path.join('app/',app.config['UPLOAD_FOLDER'], filename))
#
# ## convert file to pandas dataframe
#
# df_test=pd.read_csv(os.path.join('app/',app.config['UPLOAD_FOLDER'], filename))
# print(df_test.head(5))
# ## hash the pd , change to binary --> get fom Jason
# temp_hash_test=hash_pandas_object(df_test)
# print(temp_hash_test)
# testing_data_hash = hashlib.sha256(str(temp_hash_test).encode('utf-8','ignore')).hexdigest()
# print(testing_data_hash)
# ## update dict ---> key:hash ,value: dataframe
# TESTING_DATA[temp_hash_test]=df_test
# return jsonify({"test_data_hash":testing_data_hash})
BASIC_STATS = {}
##replace with actual function
def jacky_function(df):
return date.today(),1,len(list(df))
@app.route('/basic-stats/<hash>',methods=['GET'])
def basic_stat(hash):
## step 1 if hash in BASIC_STATS return jsonify(BASIC_STATS[hash])
## else step 2
## pull in training data
## compute basic stats(basically call Jacky's function) add results to dictionary BASIC_STATS, return jsonify(BASIC_STATS[hash])
## which is basically {"metadata": {"date": <ISO Format>, "version": <int>}, "data": {<data collection 1>: {}, <data collection 2>: {}, ...}}
print(TRAINING_DATA)
if hash in BASIC_STATS:
return jsonify({BASIC_STATS[hash]})
# error can be sent the same way jsonify(BASIC_STATS[error])
else:
#for key,value in TRAINING_DATA.items():
#print (key,value)
train_df=TRAINING_DATA[hash_train]
date_fn,version_fn,stats=jacky_function(train_df)
BASIC_STATS[hash_train]=stats
return jsonify({"metadata":{"date":str(date_fn),"version":version_fn},"data":stats})
## Prediction stats work the same way as basic stats except i need to call Jason's function instead of Jacky's function
## this would need a MODELS dictionary - key is the hash value, value is the model we train
## input to a function that Jason will write ---> model ( from the MODELS dictionary)
## output would be {"metadata": {"date": <ISO Format>, "version": <int>}, "data": {"technical_scores": [{"name": "AUC", "value": .867}, {"name": "Accuracy", "value": "79%"}], <data collection 2>: {}, ...}}
# ( inform JAcky of the structure --how its returned)
MODELS={}
##replace with actual function
sample={}
temp={}
temp
sample["technical_scores"]=[]
def jason_function(df):
return date.today(),100,len(list(df))
@app.route('/prediction-stats/<hash>',methods=['GET'])
def prediction_stat(hash):
print(TRAINING_DATA)
if hash in MODELS:
return jsonify({MODELS[hash]})
else:
train_df=TRAINING_DATA[hash]
date_fn,version_fn,pred_stats=jason_function(train_df)
MODELS[hash]=pred_stats
return jsonify({"metadata":{"date":str(date_fn),"version":version_fn},"data":pred_stats})
## test data prediction
#replace by actual code
def jason_model_creation(hash):
return 100
def jason_prediction(model_saved,hash,testing_data_hash):
return 200
## this one should return a df
def jason_add_pred_to_test(pred,testing_data_hash,hash):
return pd.DataFrame(np.random.randn(10, 5))
from flask import send_from_directory
MODELS_SAVED={}
## the below is for checking stuff only
#TESTING_DATA["e0d47420dd0157af6af54d64b14f348f1fada3c050a73cd50fad2716a38fc2b2"]=1234
@app.route('/predict/<hash>/<testing_data_hash>',methods=['GET'])
def prediction_test(hash,testing_data_hash):
print("step1")
print(TRAINING_DATA)
for key, value in TRAINING_DATA.items() :
print (key, value)
if hash not in MODELS_SAVED:
train_df=TRAINING_DATA[hash]
test_df=TESTING_DATA[testing_data_hash]
temp=jason_model_creation(hash)
## replace above based on actual model
MODELS_SAVED[hash]=temp
print("step 2")
pred=jason_prediction(MODELS_SAVED[hash],hash,testing_data_hash)
pred_df=jason_add_pred_to_test(pred,testing_data_hash,hash)
print("step 3")
pred_filename="abcd.csv" # need to have some component of date,version etc
pred_df.to_csv(pred_filename)
print(os.getcwd())
#pred_filename = secure_filename(pred_filename)
#file.save(os.path.join('app/',app.config['UPLOAD_FOLDER'], pred_filename))
#return send_from_directory(app.config['UPLOAD_FOLDER'],pred_filename)
print("step 4")
return send_from_directory(os.getcwd(),pred_filename)
## may add 2 more app.routes
| mit |
MatthieuBizien/scikit-learn | examples/exercises/plot_cv_digits.py | 135 | 1223 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn import datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause |
benitesf/Skin-Lesion-Analysis-Towards-Melanoma-Detection | main.py | 1 | 9476 | # Import methods of features extraction
from features_extraction.feature_extraction import FeatureExtraction
# Import methods of learning
from learning.learning import neural_network
# Import methods of classification
from classification.classification import classify, confusion_matrix, total_error, local_error
#
from skimage import io
from PIL import Image
# Import util methods
from sklearn.model_selection import train_test_split
import util.dirhandler as dh
import config as cfg
import numpy as np
import time
import sys
"""
Get train and test set
----------------------
"""
all_melanoma = sorted(dh.get_file_name_dir(cfg.melanoma_path, cfg.melanoma_extension))
all_ground = sorted(dh.get_file_name_dir(cfg.ground_path, cfg.ground_extension))
melanoma_train, melanoma_test, ground_train, ground_test = train_test_split(all_melanoma, all_ground, test_size=0.25,
random_state=25)
"""
----------------------
"""
"""
Feature Extraction
------------------
"""
feature = FeatureExtraction()
start_t = time.time()
X, y = feature.second_method(melanoma_train, ground_train)
feature_t = (time.time() - start_t)/60 # minutes
"""
------------------
"""
"""
Training Neural Network
-----------------------
"""
# Training the neural network with 83.3 % of the array features
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.16666)
classifier = neural_network()
start_t = time.time()
classifier.fit(X_train, y_train)
classifier_t = (time.time() - start_t)/60 # minutes
score_test = classifier.score(X_test, y_test)
score_train = classifier.score(X_train, y_train)
"""
-----------------------
"""
"""
Classify test images
---------------
"""
melanoma_list = melanoma_test
ground_list = ground_test
seg, tim, dim = classify(melanoma_list, ground_list, feature, classifier, block=True)
"""
---------------
"""
"""
Accuracy
---------
"""
confmat = confusion_matrix(seg, ground_list)
local_err = local_error(confmat)
sensitivity, specificity, accuracy = total_error(local_err)
"""
---------
"""
"""
Measure of times of execution
-----------------------------
"""
tim = np.array(tim) # sec
dim = np.array(dim)
dim = dim[0:,0] * dim[0:,1]
t_by_pix = (tim*(10**6)) / dim # microsec / pix
tim /= 60 # min
total_time = (tim/60).sum() # total hours
mean_time = tim.mean() # mean minutes
std_time = tim.std() # std minutes
"""
-----------------------------
"""
"""
Saving values
-------------
"""
files = [f.split('.')[0]+'_classified.jpg' for f in melanoma_list]
path_save = 'resultados/red3/preprocesado/test/'
for s, f in zip(seg, files):
img = Image.fromarray(s)
img.convert('L').save(path_save + f)
with open(path_save + 'Measures.txt', 'w') as output:
output.write('---------------\n')
output.write('---- RED 3 ----\n')
output.write('---------------\n\n')
output.write('Data Base: ' + cfg.melanoma_path + '\n')
output.write('Number of images: ' + str(cfg.nImage) + '\n')
output.write('Number of fields: ' + str(cfg.nCells) + '\n')
output.write('Number of images to train: ' + str(len(melanoma_train)) + '\n')
output.write('Number of image to test: ' + str(len(melanoma_test)) + '\n')
output.write('Size of Train from Train_Images: ' + str(X_train.shape) + '\n')
output.write('Size of Test from Train_Images: ' + str(X_test.shape) + '\n')
output.write('Type of segmentation: block\n\n')
output.write(classifier.__str__()+'\n\n')
output.write('Final function value: ' + str(classifier.loss_)+'\n\n')
output.write('-------------------------------------------------------------------------\n')
output.write('Time of execution: \n')
output.write('-------------------------------------------------------------------------\n\n')
output.write('Feature Extraction: \n')
output.write('\tTime: ' + str(feature_t) + ' min\n')
output.write('Neural Network Training:\n')
output.write('\tTime: ' + str(classifier_t) + ' min\n')
output.write('Segmentation by image:\n')
output.write('\tTotal: ' + str(total_time) + ' hrs\n')
output.write('\tMean: ' + str(mean_time) + '+-' + str(std_time) + ' min\n')
output.write('Segmentation by pixel:\n')
output.write('\tMean: ' + str(t_by_pix.mean()) + '+-' + str(t_by_pix.std()) + ' mircosec/pix\n')
output.write('-------------------------------------------------------------------------\n\n')
output.write('Score:\n')
output.write('\tX_train: ' + str(score_train) + '\n')
output.write('\tX_test: ' + str(score_test) + '\n')
output.write('-------------------------------------------------------------------------\n\n')
output.write('Total error\n')
output.write('\tSensitivity: ' + str(sensitivity[0]) + '+-' + str(sensitivity[1]) + '\n')
output.write('\tSpecificity: ' + str(specificity[0]) + '+-' + str(specificity[1]) + '\n')
output.write('\tAccuracy: ' + str(accuracy[0]) + '+-' + str(accuracy[1]) + '\n')
output.write('-------------------------------------------------------------------------\n\n')
output.write('Numero total de pixeles: ' + str(dim.sum()) + '\n')
output.write('-------------------------------------------------------------------------\n\n')
output.write('Local error: \n')
output.write('\t[TP\tFP\tFN\tTN]|[sensitivity, specificity, accuracy]\t\n')
for a, g, l, t, d in zip(confmat, ground_list, local_err, tim, dim):
output.write(str(a) + '\t' + g + '\t' + str(l) + '\t' + str(t) + ' min' + '\t' + str(d) + ' pix\n')
"""
-------------
"""
"""
Classify train images
---------------------
"""
melanoma_list = melanoma_train
ground_list = ground_train
seg, tim, dim = classify(melanoma_list, ground_list, feature, classifier, block=True)
"""
---------------------
"""
"""
Accuracy
---------
"""
confmat = confusion_matrix(seg, ground_list)
local_err = local_error(confmat)
sensitivity, specificity, accuracy = total_error(local_err)
"""
---------
"""
"""
Measure of times of execution
-----------------------------
"""
tim = np.array(tim) # sec
dim = np.array(dim)
dim = dim[0:,0] * dim[0:,1]
t_by_pix = (tim*(10**6)) / dim # microsec / pix
tim /= 60 # min
total_time = (tim/60).sum() # total hours
mean_time = tim.mean() # mean minutes
std_time = tim.std() # std minutes
"""
-----------------------------
"""
"""
Saving values
-------------
"""
files = [f.split('.')[0]+'_classified.jpg' for f in melanoma_list]
path_save = 'resultados/red3/preprocesado/train/'
for s, f in zip(seg, files):
img = Image.fromarray(s)
img.convert('L').save(path_save + f)
with open(path_save + 'Measures.txt', 'w') as output:
output.write('---------------\n')
output.write('---- RED 3 ----\n')
output.write('---------------\n\n')
output.write('Data Base: ' + cfg.melanoma_path + '\n')
output.write('Number of images: ' + str(cfg.nImage) + '\n')
output.write('Number of fields: ' + str(cfg.nCells) + '\n')
output.write('Number of images to train: ' + str(len(melanoma_train)) + '\n')
output.write('Number of image to test: ' + str(len(melanoma_test)) + '\n')
output.write('Size of Train from Train_Images: ' + str(X_train.shape) + '\n')
output.write('Size of Test from Train_Images: ' + str(X_test.shape) + '\n')
output.write('Type of segmentation: block\n\n')
output.write(classifier.__str__()+'\n\n')
output.write('Final function value: ' + str(classifier.loss_)+'\n\n')
output.write('-------------------------------------------------------------------------\n')
output.write('Time of execution: \n')
output.write('-------------------------------------------------------------------------\n\n')
output.write('Feature Extraction: \n')
output.write('\tTime: ' + str(feature_t) + ' min\n')
output.write('Neural Network Training:\n')
output.write('\tTime: ' + str(classifier_t) + ' min\n')
output.write('Segmentation by image:\n')
output.write('\tTotal: ' + str(total_time) + ' hrs\n')
output.write('\tMean: ' + str(mean_time) + '+-' + str(std_time) + ' min\n')
output.write('Segmentation by pixel:\n')
output.write('\tMean: ' + str(t_by_pix.mean()) + '+-' + str(t_by_pix.std()) + ' mircosec/pix\n')
output.write('-------------------------------------------------------------------------\n\n')
output.write('Score:\n')
output.write('\tX_train: ' + str(score_train) + '\n')
output.write('\tX_test: ' + str(score_test) + '\n')
output.write('-------------------------------------------------------------------------\n\n')
output.write('Total error\n')
output.write('\tSensitivity: ' + str(sensitivity[0]) + '+-' + str(sensitivity[1]) + '\n')
output.write('\tSpecificity: ' + str(specificity[0]) + '+-' + str(specificity[1]) + '\n')
output.write('\tAccuracy: ' + str(accuracy[0]) + '+-' + str(accuracy[1]) + '\n')
output.write('-------------------------------------------------------------------------\n\n')
output.write('Numero total de pixeles: ' + str(dim.sum()) + '\n')
output.write('-------------------------------------------------------------------------\n\n')
output.write('Local error: \n')
output.write('\t[TP\tFP\tFN\tTN]|[sensitivity, specificity, accuracy]\t\n')
for a, g, l, t, d in zip(confmat, ground_list, local_err, tim, dim):
output.write(str(a) + '\t' + g + '\t' + str(l) + '\t' + str(t) + ' min' + '\t' + str(d) + ' pix\n')
"""
-------------
"""
| mit |
gprMax/gprMax | setup.py | 1 | 7984 | # Copyright (C) 2015-2020: The University of Edinburgh
# Authors: Craig Warren and Antonis Giannopoulos
#
# This file is part of gprMax.
#
# gprMax is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gprMax is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
try:
import numpy as np
except ImportError:
raise ImportError('gprMax requires the NumPy package.')
import glob
import os
import pathlib
import re
import shutil
import sys
# Importing _version__.py before building can cause issues.
with open('gprMax/_version.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
# Parse package name from init file. Importing __init__.py / gprMax will break as gprMax depends on compiled .pyx files.
with open('gprMax/__init__.py', 'r') as fd:
packagename = re.search(r'^__name__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
packages = [packagename, 'tests', 'tools', 'user_libs']
# Parse long_description from README.rst file.
with open('README.rst','r') as fd:
long_description = fd.read()
# Python version
if sys.version_info[:2] < (3, 4):
sys.exit('\nExited: Requires Python 3.4 or newer!\n')
# Process 'build' command line argument
if 'build' in sys.argv:
print("Running 'build_ext --inplace'")
sys.argv.remove('build')
sys.argv.append('build_ext')
sys.argv.append('--inplace')
# Process '--no-cython' command line argument - either Cythonize or just compile the .c files
if '--no-cython' in sys.argv:
USE_CYTHON = False
sys.argv.remove('--no-cython')
else:
USE_CYTHON = True
# Build a list of all the files that need to be Cythonized looking in gprMax directory
cythonfiles = []
for root, dirs, files in os.walk(os.path.join(os.getcwd(), packagename), topdown=True):
for file in files:
if file.endswith('.pyx'):
cythonfiles.append(os.path.relpath(os.path.join(root, file)))
# Process 'cleanall' command line argument - cleanup Cython files
if 'cleanall' in sys.argv:
USE_CYTHON = False
for file in cythonfiles:
filebase = os.path.splitext(file)[0]
# Remove Cython C files
if os.path.isfile(filebase + '.c'):
try:
os.remove(filebase + '.c')
print('Removed: {}'.format(filebase + '.c'))
except OSError:
print('Could not remove: {}'.format(filebase + '.c'))
# Remove compiled Cython modules
libfile = glob.glob(os.path.join(os.getcwd(), os.path.splitext(file)[0]) + '*.pyd') + glob.glob(os.path.join(os.getcwd(), os.path.splitext(file)[0]) + '*.so')
if libfile:
libfile = libfile[0]
try:
os.remove(libfile)
print('Removed: {}'.format(os.path.abspath(libfile)))
except OSError:
print('Could not remove: {}'.format(os.path.abspath(libfile)))
# Remove build, dist, egg and __pycache__ directories
shutil.rmtree(os.path.join(os.getcwd(), 'build'), ignore_errors=True)
shutil.rmtree(os.path.join(os.getcwd(), 'dist'), ignore_errors=True)
shutil.rmtree(os.path.join(os.getcwd(), 'gprMax.egg-info'), ignore_errors=True)
for p in pathlib.Path(os.getcwd()).rglob('__pycache__'):
shutil.rmtree(p, ignore_errors=True)
print('Removed: {}'.format(p))
# Now do a normal clean
sys.argv[1] = 'clean' # this is what distutils understands
# Set compiler options
# Windows
if sys.platform == 'win32':
compile_args = ['/O2', '/openmp', '/w'] # No static linking as no static version of OpenMP library; /w disables warnings
linker_args = []
extra_objects = []
libraries=[]
# Mac OS X - needs gcc (usually via HomeBrew) because the default compiler LLVM (clang) does not support OpenMP
# - with gcc -fopenmp option implies -pthread
elif sys.platform == 'darwin':
gccpath = glob.glob('/usr/local/bin/gcc-[4-9]*')
gccpath += glob.glob('/usr/local/bin/gcc-[10-11]*')
if gccpath:
# Use newest gcc found
os.environ['CC'] = gccpath[-1].split(os.sep)[-1]
rpath = '/usr/local/opt/gcc/lib/gcc/' + gccpath[-1].split(os.sep)[-1][-1] + '/'
else:
raise('Cannot find gcc 4-10 in /usr/local/bin. gprMax requires gcc to be installed - easily done through the Homebrew package manager (http://brew.sh). Note: gcc with OpenMP support is required.')
compile_args = ['-O3', '-w', '-fopenmp', '-march=native'] # Sometimes worth testing with '-fstrict-aliasing', '-fno-common'
linker_args = ['-fopenmp', '-Wl,-rpath,' + rpath]
libraries = ['iomp5', 'pthread']
extra_objects = []
# Linux
elif sys.platform == 'linux':
compile_args = ['-O3', '-w', '-fopenmp', '-march=native']
linker_args = ['-fopenmp']
extra_objects = []
libraries=[]
# Build a list of all the extensions
extensions = []
for file in cythonfiles:
tmp = os.path.splitext(file)
if USE_CYTHON:
fileext = tmp[1]
else:
fileext = '.c'
extension = Extension(tmp[0].replace(os.sep, '.'),
[tmp[0] + fileext],
language='c',
include_dirs=[np.get_include()],
libraries=libraries,
extra_compile_args=compile_args,
extra_link_args=linker_args,
extra_objects=extra_objects)
extensions.append(extension)
# Cythonize (build .c files)
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions,
compiler_directives={
'boundscheck': False,
'wraparound': False,
'initializedcheck': False,
'embedsignature': True,
'language_level': 3
},
annotate=False)
# SetupTools Required to make package
import setuptools
setup(name=packagename,
version=version,
author='Craig Warren and Antonis Giannopoulos',
url='http://www.gprmax.com',
description='Electromagnetic Modelling Software based on the Finite-Difference Time-Domain (FDTD) method',
long_description=long_description,
long_description_content_type="text/x-rst",
license='GPLv3+',
classifiers=[
'Environment :: Console',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Cython',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering'
],
#requirements
python_requires=">3.6",
install_requires=[
"colorama",
"cython",
"h5py",
"jupyter",
"matplotlib",
"numpy",
"psutil",
"scipy",
"terminaltables",
"tqdm",
],
ext_modules=extensions,
packages=packages,
include_package_data=True,
include_dirs=[np.get_include()],
zip_safe=False)
| gpl-3.0 |
adamrvfisher/TechnicalAnalysisLibrary | Blankrunningslate.py | 1 | 1167 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 5 20:13:42 2017
@author: AmatVictoriaCuramIII
"""
firsttime = '07/01/1983'
secondtime = '01/01/1995'
thirdtime = '01/01/2006'
fourthtime = '01/01/2010'
lasttime = '01/01/2050'
ticker = '^GSPC'
import pandas as pd
from ChaikinAggMaker import ChaikinAggMaker
S1TS = pd.read_pickle('SP500NCS1TS')
S2TS = pd.read_pickle('SP500NCS2TS')
S3TS = pd.read_pickle('SP500NCS3TS')
S4TS = pd.read_pickle('SP500NCS4TS')
S1TS = S1TS.loc[:,~S1TS.columns.duplicated()]
S2TS = S2TS.loc[:,~S2TS.columns.duplicated()]
S3TS = S3TS.loc[:,~S3TS.columns.duplicated()]
S4TS = S4TS.loc[:,~S4TS.columns.duplicated()]
testset1winners = ChaikinAggMaker(ticker, S1TS, firsttime, secondtime)
testset2winners = ChaikinAggMaker(ticker, S2TS, secondtime, thirdtime)
testset3winners = ChaikinAggMaker(ticker, S3TS, thirdtime, lasttime)
testset4winners = ChaikinAggMaker(ticker, S4TS, fourthtime, lasttime)
Aggregate = pd.DataFrame()
Aggregate = pd.concat([Aggregate, testset1winners, testset2winners,
testset3winners, testset4winners],axis = 1)
Aggregate = Aggregate.loc[:,~Aggregate.columns.duplicated()] | apache-2.0 |
aflaxman/scikit-learn | sklearn/__check_build/__init__.py | 13 | 1679 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build # noqa
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
jstoxrocky/statsmodels | statsmodels/graphics/functional.py | 31 | 14477 | """Module for functional boxplots."""
from statsmodels.compat.python import combinations, range
import numpy as np
from scipy import stats
from scipy.misc import factorial
from . import utils
__all__ = ['fboxplot', 'rainbowplot', 'banddepth']
def fboxplot(data, xdata=None, labels=None, depth=None, method='MBD',
wfactor=1.5, ax=None, plot_opts={}):
"""Plot functional boxplot.
A functional boxplot is the analog of a boxplot for functional data.
Functional data is any type of data that varies over a continuum, i.e.
curves, probabillity distributions, seasonal data, etc.
The data is first ordered, the order statistic used here is `banddepth`.
Plotted are then the median curve, the envelope of the 50% central region,
the maximum non-outlying envelope and the outlier curves.
Parameters
----------
data : sequence of ndarrays or 2-D ndarray
The vectors of functions to create a functional boxplot from. If a
sequence of 1-D arrays, these should all be the same size.
The first axis is the function index, the second axis the one along
which the function is defined. So ``data[0, :]`` is the first
functional curve.
xdata : ndarray, optional
The independent variable for the data. If not given, it is assumed to
be an array of integers 0..N with N the length of the vectors in
`data`.
labels : sequence of scalar or str, optional
The labels or identifiers of the curves in `data`. If given, outliers
are labeled in the plot.
depth : ndarray, optional
A 1-D array of band depths for `data`, or equivalent order statistic.
If not given, it will be calculated through `banddepth`.
method : {'MBD', 'BD2'}, optional
The method to use to calculate the band depth. Default is 'MBD'.
wfactor : float, optional
Factor by which the central 50% region is multiplied to find the outer
region (analog of "whiskers" of a classical boxplot).
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
plot_opts : dict, optional
A dictionary with plotting options. Any of the following can be
provided, if not present in `plot_opts` the defaults will be used::
- 'cmap_outliers', a Matplotlib LinearSegmentedColormap instance.
- 'c_inner', valid MPL color. Color of the central 50% region
- 'c_outer', valid MPL color. Color of the non-outlying region
- 'c_median', valid MPL color. Color of the median.
- 'lw_outliers', scalar. Linewidth for drawing outlier curves.
- 'lw_median', scalar. Linewidth for drawing the median curve.
- 'draw_nonout', bool. If True, also draw non-outlying curves.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
depth : ndarray
1-D array containing the calculated band depths of the curves.
ix_depth : ndarray
1-D array of indices needed to order curves (or `depth`) from most to
least central curve.
ix_outliers : ndarray
1-D array of indices of outlying curves in `data`.
See Also
--------
banddepth, rainbowplot
Notes
-----
The median curve is the curve with the highest band depth.
Outliers are defined as curves that fall outside the band created by
multiplying the central region by `wfactor`. Note that the range over
which they fall outside this band doesn't matter, a single data point
outside the band is enough. If the data is noisy, smoothing may therefore
be required.
The non-outlying region is defined as the band made up of all the
non-outlying curves.
References
----------
[1] Y. Sun and M.G. Genton, "Functional Boxplots", Journal of Computational
and Graphical Statistics, vol. 20, pp. 1-19, 2011.
[2] R.J. Hyndman and H.L. Shang, "Rainbow Plots, Bagplots, and Boxplots for
Functional Data", vol. 19, pp. 29-25, 2010.
Examples
--------
Load the El Nino dataset. Consists of 60 years worth of Pacific Ocean sea
surface temperature data.
>>> import matplotlib.pyplot as plt
>>> import statsmodels.api as sm
>>> data = sm.datasets.elnino.load()
Create a functional boxplot. We see that the years 1982-83 and 1997-98 are
outliers; these are the years where El Nino (a climate pattern
characterized by warming up of the sea surface and higher air pressures)
occurred with unusual intensity.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> res = sm.graphics.fboxplot(data.raw_data[:, 1:], wfactor=2.58,
... labels=data.raw_data[:, 0].astype(int),
... ax=ax)
>>> ax.set_xlabel("Month of the year")
>>> ax.set_ylabel("Sea surface temperature (C)")
>>> ax.set_xticks(np.arange(13, step=3) - 1)
>>> ax.set_xticklabels(["", "Mar", "Jun", "Sep", "Dec"])
>>> ax.set_xlim([-0.2, 11.2])
>>> plt.show()
.. plot:: plots/graphics_functional_fboxplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if plot_opts.get('cmap_outliers') is None:
from matplotlib.cm import rainbow_r
plot_opts['cmap_outliers'] = rainbow_r
data = np.asarray(data)
if xdata is None:
xdata = np.arange(data.shape[1])
# Calculate band depth if required.
if depth is None:
if method not in ['MBD', 'BD2']:
raise ValueError("Unknown value for parameter `method`.")
depth = banddepth(data, method=method)
else:
if depth.size != data.shape[0]:
raise ValueError("Provided `depth` array is not of correct size.")
# Inner area is 25%-75% region of band-depth ordered curves.
ix_depth = np.argsort(depth)[::-1]
median_curve = data[ix_depth[0], :]
ix_IQR = data.shape[0] // 2
lower = data[ix_depth[0:ix_IQR], :].min(axis=0)
upper = data[ix_depth[0:ix_IQR], :].max(axis=0)
# Determine region for outlier detection
inner_median = np.median(data[ix_depth[0:ix_IQR], :], axis=0)
lower_fence = inner_median - (inner_median - lower) * wfactor
upper_fence = inner_median + (upper - inner_median) * wfactor
# Find outliers.
ix_outliers = []
ix_nonout = []
for ii in range(data.shape[0]):
if np.any(data[ii, :] > upper_fence) or np.any(data[ii, :] < lower_fence):
ix_outliers.append(ii)
else:
ix_nonout.append(ii)
ix_outliers = np.asarray(ix_outliers)
# Plot envelope of all non-outlying data
lower_nonout = data[ix_nonout, :].min(axis=0)
upper_nonout = data[ix_nonout, :].max(axis=0)
ax.fill_between(xdata, lower_nonout, upper_nonout,
color=plot_opts.get('c_outer', (0.75,0.75,0.75)))
# Plot central 50% region
ax.fill_between(xdata, lower, upper,
color=plot_opts.get('c_inner', (0.5,0.5,0.5)))
# Plot median curve
ax.plot(xdata, median_curve, color=plot_opts.get('c_median', 'k'),
lw=plot_opts.get('lw_median', 2))
# Plot outliers
cmap = plot_opts.get('cmap_outliers')
for ii, ix in enumerate(ix_outliers):
label = str(labels[ix]) if labels is not None else None
ax.plot(xdata, data[ix, :],
color=cmap(float(ii) / (len(ix_outliers)-1)), label=label,
lw=plot_opts.get('lw_outliers', 1))
if plot_opts.get('draw_nonout', False):
for ix in ix_nonout:
ax.plot(xdata, data[ix, :], 'k-', lw=0.5)
if labels is not None:
ax.legend()
return fig, depth, ix_depth, ix_outliers
def rainbowplot(data, xdata=None, depth=None, method='MBD', ax=None,
cmap=None):
"""Create a rainbow plot for a set of curves.
A rainbow plot contains line plots of all curves in the dataset, colored in
order of functional depth. The median curve is shown in black.
Parameters
----------
data : sequence of ndarrays or 2-D ndarray
The vectors of functions to create a functional boxplot from. If a
sequence of 1-D arrays, these should all be the same size.
The first axis is the function index, the second axis the one along
which the function is defined. So ``data[0, :]`` is the first
functional curve.
xdata : ndarray, optional
The independent variable for the data. If not given, it is assumed to
be an array of integers 0..N with N the length of the vectors in
`data`.
depth : ndarray, optional
A 1-D array of band depths for `data`, or equivalent order statistic.
If not given, it will be calculated through `banddepth`.
method : {'MBD', 'BD2'}, optional
The method to use to calculate the band depth. Default is 'MBD'.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
cmap : Matplotlib LinearSegmentedColormap instance, optional
The colormap used to color curves with. Default is a rainbow colormap,
with red used for the most central and purple for the least central
curves.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
banddepth, fboxplot
References
----------
[1] R.J. Hyndman and H.L. Shang, "Rainbow Plots, Bagplots, and Boxplots for
Functional Data", vol. 19, pp. 29-25, 2010.
Examples
--------
Load the El Nino dataset. Consists of 60 years worth of Pacific Ocean sea
surface temperature data.
>>> import matplotlib.pyplot as plt
>>> import statsmodels.api as sm
>>> data = sm.datasets.elnino.load()
Create a rainbow plot:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> res = sm.graphics.rainbowplot(data.raw_data[:, 1:], ax=ax)
>>> ax.set_xlabel("Month of the year")
>>> ax.set_ylabel("Sea surface temperature (C)")
>>> ax.set_xticks(np.arange(13, step=3) - 1)
>>> ax.set_xticklabels(["", "Mar", "Jun", "Sep", "Dec"])
>>> ax.set_xlim([-0.2, 11.2])
>>> plt.show()
.. plot:: plots/graphics_functional_rainbowplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if cmap is None:
from matplotlib.cm import rainbow_r
cmap = rainbow_r
data = np.asarray(data)
if xdata is None:
xdata = np.arange(data.shape[1])
# Calculate band depth if required.
if depth is None:
if method not in ['MBD', 'BD2']:
raise ValueError("Unknown value for parameter `method`.")
depth = banddepth(data, method=method)
else:
if depth.size != data.shape[0]:
raise ValueError("Provided `depth` array is not of correct size.")
ix_depth = np.argsort(depth)[::-1]
# Plot all curves, colored by depth
num_curves = data.shape[0]
for ii in range(num_curves):
ax.plot(xdata, data[ix_depth[ii], :], c=cmap(ii / (num_curves - 1.)))
# Plot the median curve
median_curve = data[ix_depth[0], :]
ax.plot(xdata, median_curve, 'k-', lw=2)
return fig
def banddepth(data, method='MBD'):
"""Calculate the band depth for a set of functional curves.
Band depth is an order statistic for functional data (see `fboxplot`), with
a higher band depth indicating larger "centrality". In analog to scalar
data, the functional curve with highest band depth is called the median
curve, and the band made up from the first N/2 of N curves is the 50%
central region.
Parameters
----------
data : ndarray
The vectors of functions to create a functional boxplot from.
The first axis is the function index, the second axis the one along
which the function is defined. So ``data[0, :]`` is the first
functional curve.
method : {'MBD', 'BD2'}, optional
Whether to use the original band depth (with J=2) of [1]_ or the
modified band depth. See Notes for details.
Returns
-------
depth : ndarray
Depth values for functional curves.
Notes
-----
Functional band depth as an order statistic for functional data was
proposed in [1]_ and applied to functional boxplots and bagplots in [2]_.
The method 'BD2' checks for each curve whether it lies completely inside
bands constructed from two curves. All permutations of two curves in the
set of curves are used, and the band depth is normalized to one. Due to
the complete curve having to fall within the band, this method yields a lot
of ties.
The method 'MBD' is similar to 'BD2', but checks the fraction of the curve
falling within the bands. It therefore generates very few ties.
References
----------
.. [1] S. Lopez-Pintado and J. Romo, "On the Concept of Depth for
Functional Data", Journal of the American Statistical Association,
vol. 104, pp. 718-734, 2009.
.. [2] Y. Sun and M.G. Genton, "Functional Boxplots", Journal of
Computational and Graphical Statistics, vol. 20, pp. 1-19, 2011.
"""
def _band2(x1, x2, curve):
xb = np.vstack([x1, x2])
if np.any(curve < xb.min(axis=0)) or np.any(curve > xb.max(axis=0)):
res = 0
else:
res = 1
return res
def _band_mod(x1, x2, curve):
xb = np.vstack([x1, x2])
res = np.logical_and(curve >= xb.min(axis=0),
curve <= xb.max(axis=0))
return np.sum(res) / float(res.size)
if method == 'BD2':
band = _band2
elif method == 'MBD':
band = _band_mod
else:
raise ValueError("Unknown input value for parameter `method`.")
num = data.shape[0]
ix = np.arange(num)
depth = []
for ii in range(num):
res = 0
for ix1, ix2 in combinations(ix, 2):
res += band(data[ix1, :], data[ix2, :], data[ii, :])
# Normalize by number of combinations to get band depth
normfactor = factorial(num) / 2. / factorial(num - 2)
depth.append(float(res) / normfactor)
return np.asarray(depth)
| bsd-3-clause |
necozay/tulip-control | tulip/transys/export/graph2dot.py | 1 | 17106 | # Copyright (c) 2013-2014 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""Convert labeled graph to dot using
pydot and custom filtering
"""
from __future__ import division
import logging
import re
from collections import Iterable
from textwrap import fill
from cStringIO import StringIO
import numpy as np
import networkx as nx
from networkx.utils import make_str
import pydot
# inline:
#
# import webcolors
logger = logging.getLogger(__name__)
def _states2dot_str(graph, to_pydot_graph, wrap=10,
tikz=False, rankdir='TB'):
"""Copy nodes to given Pydot graph, with attributes for dot export."""
# TODO generate LaTeX legend table for edge labels
states = graph.states
# get labeling def
if hasattr(graph, '_state_label_def'):
label_def = graph._state_label_def
if hasattr(graph, '_state_dot_label_format'):
label_format = graph._state_dot_label_format
else:
label_format = {'type?label': '', 'separator': '\n'}
for u, d in graph.nodes_iter(data=True):
# initial state ?
is_initial = u in states.initial
is_accepting = _is_accepting(graph, u)
# state annotation
node_dot_label = _form_node_label(
u, d, label_def,
label_format, wrap, tikz=tikz
)
# node_dot_label = fill(str(state), width=wrap)
rim_color = d.get('color', 'black')
if tikz:
_state2tikz(graph, to_pydot_graph, u,
is_initial, is_accepting, rankdir,
rim_color, d, node_dot_label)
else:
_state2dot(graph, to_pydot_graph, u,
is_initial, is_accepting,
rim_color, d, node_dot_label)
def _state2dot(graph, to_pydot_graph, state,
is_initial, is_accepting,
rim_color, d, node_dot_label):
if is_initial:
_add_incoming_edge(to_pydot_graph, state)
normal_shape = graph.dot_node_shape['normal']
accept_shape = graph.dot_node_shape.get('accepting', '')
shape = accept_shape if is_accepting else normal_shape
corners = 'rounded' if shape is 'rectangle' else ''
rim_color = '"' + _format_color(rim_color, 'dot') + '"'
fc = d.get('fillcolor', 'none')
filled = '' if fc is 'none' else 'filled'
if fc is 'gradient':
# top/bottom colors not supported for dot
lc = d.get('left_color', d['top_color'])
rc = d.get('right_color', d['bottom_color'])
if isinstance(lc, basestring):
fillcolor = lc
elif isinstance(lc, dict):
fillcolor = lc.keys()[0]
else:
raise TypeError('left_color must be str or dict.')
if isinstance(rc, basestring):
fillcolor += ':' + rc
elif isinstance(rc, dict):
fillcolor += ':' + rc.keys()[0]
else:
raise TypeError('right_color must be str or dict.')
else:
fillcolor = _format_color(fc, 'dot')
if corners and filled:
node_style = '"' + corners + ', ' + filled + '"'
elif corners:
node_style = '"' + corners + '"'
else:
node_style = '"' + filled + '"'
to_pydot_graph.add_node(
state,
label=node_dot_label,
shape=shape,
style=node_style,
color=rim_color,
fillcolor='"' + fillcolor + '"')
def _state2tikz(graph, to_pydot_graph, state,
is_initial, is_accepting, rankdir,
rim_color, d, node_dot_label):
style = 'state'
if rankdir is 'LR':
init_dir = 'initial left'
elif rankdir is 'RL':
init_dir = 'initial right'
elif rankdir is 'TB':
init_dir = 'initial above'
elif rankdir is 'BT':
init_dir = 'initial below'
else:
raise ValueError('Unknown rankdir')
if is_initial:
style += ', initial by arrow, ' + init_dir + ', initial text='
if is_accepting:
style += ', accepting'
if graph.dot_node_shape['normal'] is 'rectangle':
style += ', shape = rectangle, rounded corners'
# darken the rim
if 'black' in rim_color:
c = _format_color(rim_color, 'tikz')
else:
c = _format_color(rim_color, 'tikz') + '!black!30'
style += ', draw = ' + c
fill = d.get('fillcolor')
if fill is 'gradient':
s = {'top_color', 'bottom_color',
'left_color', 'right_color'}
for x in s:
if x in d:
style += ', ' + x + ' = ' + _format_color(d[x], 'tikz')
elif fill is not None:
# not gradient
style += ', fill = ' + _format_color(fill, 'tikz')
else:
logger.debug('fillcolor is None')
to_pydot_graph.add_node(
state,
texlbl=node_dot_label,
style=style)
def _format_color(color, prog='tikz'):
"""Encode color in syntax for given program.
@type color:
- C{str} for single color or
- C{dict} for weighted color mix
@type prog: 'tikz' or 'dot'
"""
if isinstance(color, basestring):
return color
if not isinstance(color, dict):
raise Exception('color must be str or dict')
if prog is 'tikz':
s = '!'.join([k + '!' + str(v) for k, v in color.iteritems()])
elif prog is 'dot':
t = sum(color.itervalues())
try:
import webcolors
# mix them
result = np.array((0.0, 0.0, 0.0))
for c, w in color.iteritems():
result += w/t * np.array(webcolors.name_to_rgb(c))
s = webcolors.rgb_to_hex(result)
except:
logger.warn('failed to import webcolors')
s = ':'.join([k + ';' + str(v/t) for k, v in color.iteritems()])
else:
raise ValueError('Unknown program: ' + str(prog) + '. '
"Available options are: 'dot' or 'tikz'.")
return s
def _place_initial_states(trs_graph, pd_graph, tikz):
init_subg = pydot.Subgraph('initial')
init_subg.set_rank('source')
for node in trs_graph.states.initial:
pd_node = pydot.Node(make_str(node))
init_subg.add_node(pd_node)
phantom_node = 'phantominit' + str(node)
pd_node = pydot.Node(make_str(phantom_node))
init_subg.add_node(pd_node)
pd_graph.add_subgraph(init_subg)
def _add_incoming_edge(g, state):
phantom_node = 'phantominit' + str(state)
g.add_node(phantom_node, label='""', shape='none', width='0')
g.add_edge(phantom_node, state)
def _form_node_label(state, state_data, label_def,
label_format, width=10, tikz=False):
# node itself
state_str = str(state)
state_str = state_str.replace("'", "")
# rm parentheses to reduce size of states in fig
if tikz:
state_str = state_str.replace('(', '')
state_str = state_str.replace(')', '')
# make indices subscripts
if tikz:
pattern = '([a-zA-Z]\d+)'
make_subscript = lambda x: x.group(0)[0] + '_' + x.group(0)[1:]
state_str = re.sub(pattern, make_subscript, state_str)
# SVG requires breaking the math environment into
# one math env per line. Just make 1st line math env
# if latex:
# state_str = '$' + state_str + '$'
# state_str = fill(state_str, width=width)
node_dot_label = state_str
# newline between state name and label, only if state is labeled
if len(state_data) != 0:
node_dot_label += r'\n'
# add node annotations from action, AP sets etc
# other key,values in state attr_dict ignored
pieces = list()
for (label_type, label_value) in state_data.iteritems():
if label_type not in label_def:
continue
# label formatting
type_name = label_format[label_type]
sep_type_value = label_format['type?label']
# avoid turning strings to lists,
# or non-iterables to lists
if isinstance(label_value, str):
label_str = fill(label_value, width=width)
elif isinstance(label_value, Iterable): # and not str
s = ', '.join([str(x) for x in label_value])
label_str = r'\\{' + fill(s, width=width) + r'\\}'
else:
label_str = fill(str(label_value), width=width)
pieces.append(type_name + sep_type_value + label_str)
sep_label_sets = label_format['separator']
node_dot_label += sep_label_sets.join(pieces)
if tikz:
# replace LF by latex newline
node_dot_label = node_dot_label.replace(r'\n', r'\\\\ ')
# dot2tex math mode doesn't handle newlines properly
node_dot_label = (
r'$\\begin{matrix} ' + node_dot_label +
r'\\end{matrix}$'
)
return node_dot_label
def _is_accepting(graph, state):
"""accepting state ?"""
# no accepting states defined ?
if not hasattr(graph.states, 'accepting'):
return False
return state in graph.states.accepting
def _transitions2dot_str(trans, to_pydot_graph, tikz=False):
"""Convert transitions to dot str.
@rtype: str
"""
if not hasattr(trans.graph, '_transition_label_def'):
return
if not hasattr(trans.graph, '_transition_dot_label_format'):
return
if not hasattr(trans.graph, '_transition_dot_mask'):
return
# get labeling def
label_def = trans.graph._transition_label_def
label_format = trans.graph._transition_dot_label_format
label_mask = trans.graph._transition_dot_mask
for (u, v, key, edge_data) in trans.graph.edges_iter(
data=True, keys=True
):
edge_dot_label = _form_edge_label(
edge_data, label_def,
label_format, label_mask, tikz
)
edge_color = edge_data.get('color', 'black')
to_pydot_graph.add_edge(u, v, key=key,
label=edge_dot_label,
color=edge_color)
def _form_edge_label(edge_data, label_def,
label_format, label_mask, tikz):
label = '' # dot label for edge
sep_label_sets = label_format['separator']
for label_type, label_value in edge_data.iteritems():
if label_type not in label_def:
continue
# masking defined ?
# custom filter hiding based on value
if label_type in label_mask:
# not show ?
if not label_mask[label_type](label_value):
continue
# label formatting
if label_type in label_format:
type_name = label_format[label_type]
sep_type_value = label_format['type?label']
else:
type_name = ':'
sep_type_value = r',\n'
# format iterable containers using
# mathematical set notation: {...}
if isinstance(label_value, basestring):
# str is Iterable: avoid turning it to list
label_str = label_value
elif isinstance(label_value, Iterable):
s = ', '.join([str(x) for x in label_value])
label_str = r'\\{' + fill(s) + r'\\}'
else:
label_str = str(label_value)
if tikz:
type_name = r'\mathrm' + '{' + type_name + '}'
label += (type_name + sep_type_value +
label_str + sep_label_sets)
if tikz:
label = r'\\begin{matrix}' + label + r'\\end{matrix}'
label = '"' + label + '"'
return label
def _graph2pydot(graph, wrap=10, tikz=False,
rankdir='TB'):
"""Convert (possibly labeled) state graph to dot str.
@type graph: L{LabeledDiGraph}
@rtype: str
"""
dummy_nx_graph = nx.MultiDiGraph()
_states2dot_str(graph, dummy_nx_graph, wrap=wrap, tikz=tikz,
rankdir=rankdir)
_transitions2dot_str(graph.transitions, dummy_nx_graph, tikz=tikz)
pydot_graph = nx.drawing.nx_pydot.to_pydot(dummy_nx_graph)
_place_initial_states(graph, pydot_graph, tikz)
pydot_graph.set_overlap('false')
# pydot_graph.set_size('"0.25,1"')
# pydot_graph.set_ratio('"compress"')
pydot_graph.set_nodesep(0.5)
pydot_graph.set_ranksep(0.1)
return pydot_graph
def graph2dot_str(graph, wrap=10, tikz=False):
"""Convert graph to dot string.
Requires pydot.
@type graph: L{LabeledDiGraph}
@param wrap: textwrap width
@rtype: str
"""
pydot_graph = _graph2pydot(graph, wrap=wrap, tikz=tikz)
return pydot_graph.to_string()
def save_dot(graph, path, fileformat, rankdir, prog, wrap, tikz=False):
"""Save state graph to dot file.
@type graph: L{LabeledDiGraph}
@return: True upon success
@rtype: bool
"""
pydot_graph = _graph2pydot(graph, wrap=wrap, tikz=tikz,
rankdir=rankdir)
if pydot_graph is None:
# graph2dot must have printed warning already
return False
pydot_graph.set_rankdir(rankdir)
pydot_graph.set_splines('true')
# turn off graphviz warnings caused by tikz labels
if tikz:
prog = [prog, '-q 1']
pydot_graph.write(path, format=fileformat, prog=prog)
return True
def plot_pydot(graph, prog='dot', rankdir='LR', wrap=10, ax=None):
"""Plot a networkx or pydot graph using dot.
No files written or deleted from the disk.
Note that all networkx graph classes are inherited
from networkx.Graph
See Also
========
dot & pydot documentation
@param graph: to plot
@type graph: networkx.Graph | pydot.Graph
@param prog: GraphViz programto use
@type prog: 'dot' | 'neato' | 'circo' | 'twopi'
| 'fdp' | 'sfdp' | etc
@param rankdir: direction to layout nodes
@type rankdir: 'LR' | 'TB'
@param ax: axes
"""
try:
pydot_graph = _graph2pydot(graph, wrap=wrap)
except:
if isinstance(graph, nx.Graph):
pydot_graph = nx.drawing.nx_pydot.to_pydot(graph)
else:
raise TypeError(
'graph not networkx or pydot class.' +
'Got instead: ' + str(type(graph)))
pydot_graph.set_rankdir(rankdir)
pydot_graph.set_splines('true')
pydot_graph.set_bgcolor('gray')
png_str = pydot_graph.create_png(prog=prog)
# installed ?
try:
from IPython.display import display, Image
logger.debug('IPython installed.')
# called by IPython ?
try:
cfg = get_ipython().config
logger.debug('Script called by IPython.')
# Caution!!! : not ordinary dict,
# but IPython.config.loader.Config
# qtconsole ?
if cfg['IPKernelApp']:
logger.debug('Within IPython QtConsole.')
display(Image(data=png_str))
return True
except:
print('IPython installed, but not called from it.')
except ImportError:
logger.warn('IPython not found.\nSo loaded dot images not inline.')
# not called from IPython QtConsole, try Matplotlib...
# installed ?
try:
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
except:
logger.debug('Matplotlib not installed.')
logger.warn('Neither IPython QtConsole nor Matplotlib available.')
return None
logger.debug('Matplotlib installed.')
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
sio = StringIO()
sio.write(png_str)
sio.seek(0)
img = mpimg.imread(sio)
ax.imshow(img, aspect='equal')
plt.show(block=False)
return ax
| bsd-3-clause |
jakobworldpeace/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 104 | 3139 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the mean and std for each candidate along with the parameter
# settings for all the candidates explored by grid search.
n_candidates = len(grid_search.cv_results_['params'])
for i in range(n_candidates):
print(i, 'params - %s; mean - %0.2f; std - %0.2f'
% (grid_search.cv_results_['params'][i],
grid_search.cv_results_['mean_test_score'][i],
grid_search.cv_results_['std_test_score'][i]))
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
chungjjang80/FRETBursts | fretbursts/utils/examples/matplotlib_figure_mod_toolbar.py | 2 | 1276 | """
Example on how to add widgets the toolbar of a Matplotlib figure using the
QT backend.
No QT application is created, only the toolbar of the native MPL figure is
modified.
"""
from PySide import QtGui, QtCore
import matplotlib
def test():
plot([1,2,3], lw=2)
q = qt4_interface(gcf())
return q # WARNING: it's paramount to return the object otherwise, with
# no references, python deletes it and the GUI doesn't respond!
class qt4_interface:
def __init__(self,fig):
self.fig = fig
toolbar = fig.canvas.toolbar
self.line_edit = QtGui.QLineEdit()
toolbar.addWidget(self.line_edit)
self.line_edit.editingFinished.connect(self.do_something)
self.spinbox = QtGui.QDoubleSpinBox()
toolbar.addWidget(self.spinbox)
self.spinbox.valueChanged.connect(self.do_something2)
def do_something(self, *args):
self.fig.axes[0].set_title(self.line_edit.text())
self.fig.canvas.draw()
#f = open('l','a'); f.write('yes\n'); f.flush(); f.close()
def do_something2(self, *args):
self.fig.axes[0].set_xlim(0, self.spinbox.value())
self.fig.canvas.draw()
#f = open('l','a'); f.write('yes\n'); f.flush(); f.close()
| gpl-2.0 |
astroclark/bhextractor | bin/bhex_scalemassdemo.py | 1 | 4019 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2015 James Clark <james.clark@ligo.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
bhextractor_plotpca.py
Construct waveform catalogues and PCA for plotting and diagnostics
"""
import numpy as np
from matplotlib import pyplot as pl
import bhextractor_pca as bhex
import pycbc.types
import pycbc.filter
from pycbc.psd import aLIGOZeroDetHighPower
# -------------------------------
# USER INPUT
catalogue_name='Q'
theta=90.0
# END USER INPUT
# -------------------------------
# -------------------------------
# ANALYSIS
catlen=4
#
# Setup and then build the catalogue
#
catalogue = bhex.waveform_catalogue(catalogue_name=catalogue_name, fs=2048,
catalogue_len=catlen, mtotal_ref=250, Dist=1., theta=theta)
oriwave250 = np.copy(catalogue.aligned_catalogue[0,:])
#
# Do the PCA
#
pca = bhex.waveform_pca(catalogue)
#
# Build a 350 solar mass waveform from the 250 Msun PCs#
# Just use the first waveform
betas = pca.projection_plus[catalogue.waveform_names[0]]
times = np.arange(0,len(catalogue.aligned_catalogue[0,:])/2048.,1./2048)
recwave350 = bhex.reconstruct_waveform(pca.pca_plus, betas, len(catalogue.waveform_names),
mtotal_target=350.0)
#
# Now make a catalogue at 350 solar masses and then compute the overlap
#
catalogue350 = bhex.waveform_catalogue(catalogue_name=catalogue_name, fs=2048,
catalogue_len=catlen, mtotal_ref=350, Dist=1., theta=theta)
oriwave350 = np.copy(catalogue350.aligned_catalogue[0,:])
# Finally, compute the match between the reconstructed 350 Msun system and the
# system we generated at that mass in the first place
recwave350_pycbc = pycbc.types.TimeSeries(np.real(recwave350), delta_t=1./2048)
oriwave250_pycbc = pycbc.types.TimeSeries(np.real(oriwave250), delta_t=1./2048)
oriwave350_pycbc = pycbc.types.TimeSeries(np.real(oriwave350), delta_t=1./2048)
psd = aLIGOZeroDetHighPower(len(recwave350_pycbc.to_frequencyseries()),
recwave350_pycbc.to_frequencyseries().delta_f, low_freq_cutoff=10.0)
match_cat = pycbc.filter.match(oriwave250_pycbc.to_frequencyseries(),
oriwave350_pycbc.to_frequencyseries(), psd=psd,
low_frequency_cutoff=10)[0]
match_rec = pycbc.filter.match(recwave350_pycbc.to_frequencyseries(),
oriwave350_pycbc.to_frequencyseries(), psd=psd,
low_frequency_cutoff=10)[0]
print 'Match between 250 and 350 Msun catalogue waves: ', match_cat
print 'Match between 350 reconstruction and 350 catalogue wave: ', match_rec
#
# Make plots
#
if 1:
print "Plotting reconstructions"
fig, ax = pl.subplots(nrows=2,ncols=1)
ax[0].plot(times,np.real(oriwave250), 'b', label='250 M$_{\odot}$ catalogue')
ax[0].plot(times,np.real(oriwave350), 'g', label='350 M$_{\odot}$ catalogue')
ax[0].set_xlim(0,2.5)
ax[0].set_title('Match = %f'% match_cat)
ax[0].legend(loc='upper left',prop={'size':10})
ax[1].plot(times,np.real(oriwave350), 'g', label='350 M$_{\odot}$ catalogue')
ax[1].plot(times,np.real(recwave350), 'r', label='350 M$_{\odot}$ reconstruction')
ax[1].set_xlim(0,2.5)
ax[1].set_xlabel('Time (s)')
ax[1].set_title('Match = %f'% match_rec)
ax[1].legend(loc='upper left',prop={'size':10})
fig.tight_layout()
fig.savefig('scalemassdemo.png')
| gpl-2.0 |
justrypython/EAST | svm_model_v2.py | 1 | 2801 | #encoding:UTF-8
import os
import numpy as np
import sys
import cv2
import matplotlib.pyplot as plt
from sklearn.svm import NuSVC, SVC
import datetime
import pickle
#calculate the area
def area(p):
p = p.reshape((-1, 2))
return 0.5 * abs(sum(x0*y1 - x1*y0
for ((x0, y0), (x1, y1)) in segments(p)))
def segments(p):
return zip(p, np.concatenate((p[1:], [p[0]])))
def calc_xy(p0, p1, p2):
cos = calc_cos(p0, p1, p2)
dis = calc_dis(p0, p2)
return dis * cos, dis * np.sqrt(1 - np.square(cos))
def calc_dis(p0, p1):
return np.sqrt(np.sum(np.square(p0-p1)))
def calc_cos(p0, p1, p2):
A = p1 - p0
B = p2 - p0
num = np.dot(A, B)
demon = np.linalg.norm(A) * np.linalg.norm(B)
return num / demon
def calc_new_xy(boxes):
box0 = boxes[:8]
box1 = boxes[8:]
x, y = calc_xy(box1[4:6], box1[6:], box0[:2])
dis = calc_dis(box1[4:6], box1[6:])
area0 = area(box0)
area1 = area(box1)
return x/dis, y/dis
if __name__ == '__main__':
test = True
path = '/media/zhaoke/b0685ee4-63e3-4691-ae02-feceacff6996/data/'
paths = os.listdir(path)
paths = [i for i in paths if '.txt' in i]
boxes = np.empty((800000, 9))
cnt = 0
for txt in paths:
f = open(path+txt, 'r')
lines = f.readlines()
f.close()
lines = [i.replace('\n', '').split(',') for i in lines]
lines = np.array(lines).astype(np.uint32)
boxes[cnt*10:cnt*10+len(lines)] = lines
cnt += 1
zeros = boxes==[0, 0, 0, 0, 0, 0, 0, 0, 0]
zeros_labels = zeros.all(axis=1)
zeros_labels = np.where(zeros_labels==True)
idboxes = boxes[boxes[:, 8]==7]
idboxes = np.tile(idboxes[:, :8], (1, 10))
idboxes = idboxes.reshape((-1, 8))
boxes = np.delete(boxes, zeros_labels[0], axis=0)
idboxes = np.delete(idboxes, zeros_labels[0], axis=0)
boxes_idboxes = np.concatenate((boxes[:, :8], idboxes), axis=1)
start_time = datetime.datetime.now()
print start_time
new_xy = np.apply_along_axis(calc_new_xy, 1, boxes_idboxes)
end_time = datetime.datetime.now()
print end_time - start_time
if test:
with open('clf_address_v2.pickle', 'rb') as f:
clf = pickle.load(f)
cnt = 0
for i, xy in enumerate(new_xy):
cls = int(clf.predict([xy])[0])
if cls == int(boxes[i, 8]):
cnt += 1
if i % 10000 == 0 and i != 0:
print i, ':', float(cnt) / i
else:
clf = SVC()
start_time = datetime.datetime.now()
print start_time
clf.fit(new_xy[:], boxes[:, 8])
end_time = datetime.datetime.now()
print end_time - start_time
with open('clf.pickle', 'wb') as f:
pickle.dump(clf, f)
print 'end' | gpl-3.0 |
mojoboss/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
michaelld/gnuradio | gnuradio-runtime/apps/evaluation_random_numbers.py | 7 | 5284 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr
import numpy as np
from scipy.stats import norm, laplace, rayleigh
from matplotlib import pyplot as plt
# NOTE: scipy and matplotlib are optional packages and not included in the default gnuradio dependencies
#*** SETUP ***#
# Number of realisations per histogram
num_tests = 1000000
# Set number of bins in histograms
uniform_num_bins = 31
gauss_num_bins = 31
rayleigh_num_bins = 31
laplace_num_bins = 31
rndm = gr.random() # instance of gnuradio random class (gr::random)
print('All histograms contain',num_tests,'realisations.')
#*** GENERATE DATA ***#
uniform_values = np.zeros(num_tests)
gauss_values = np.zeros(num_tests)
rayleigh_values = np.zeros(num_tests)
laplace_values = np.zeros(num_tests)
for k in range(num_tests):
uniform_values[k] = rndm.ran1()
gauss_values[k] = rndm.gasdev()
rayleigh_values[k] = rndm.rayleigh()
laplace_values[k] = rndm.laplacian()
#*** HISTOGRAM DATA AND CALCULATE EXPECTED COUNTS ***#
uniform_bins = np.linspace(0,1,uniform_num_bins)
gauss_bins = np.linspace(-8,8,gauss_num_bins)
laplace_bins = np.linspace(-8,8,laplace_num_bins)
rayleigh_bins = np.linspace(0,10,rayleigh_num_bins)
uniform_hist = np.histogram(uniform_values,uniform_bins)
gauss_hist = np.histogram(gauss_values,gauss_bins)
rayleigh_hist = np.histogram(rayleigh_values,rayleigh_bins)
laplace_hist = np.histogram(laplace_values,laplace_bins)
uniform_expected = np.zeros(uniform_num_bins-1)
gauss_expected = np.zeros(gauss_num_bins-1)
rayleigh_expected = np.zeros(rayleigh_num_bins-1)
laplace_expected = np.zeros(laplace_num_bins-1)
for k in range(len(uniform_hist[0])):
uniform_expected[k] = num_tests / float(uniform_num_bins-1)
for k in range(len(gauss_hist[0])):
gauss_expected[k] = float(norm.cdf(gauss_hist[1][k+1])-norm.cdf(gauss_hist[1][k]))*num_tests
for k in range(len(rayleigh_hist[0])):
rayleigh_expected[k] = float(rayleigh.cdf(rayleigh_hist[1][k+1])-rayleigh.cdf(rayleigh_hist[1][k]))*num_tests
for k in range(len(laplace_hist[0])):
laplace_expected[k] = float(laplace.cdf(laplace_hist[1][k+1])-laplace.cdf(laplace_hist[1][k]))*num_tests
#*** PLOT HISTOGRAMS AND EXPECTATIONS TAKEN FROM SCIPY ***#
uniform_bins_center = uniform_bins[0:-1]+(uniform_bins[1]-uniform_bins[0]) / 2.0
gauss_bins_center = gauss_bins[0:-1]+(gauss_bins[1]-gauss_bins[0]) / 2.0
rayleigh_bins_center = rayleigh_bins[0:-1]+(rayleigh_bins[1]-rayleigh_bins[0]) / 2.0
laplace_bins_center = laplace_bins[0:-1]+(laplace_bins[1]-laplace_bins[0]) / 2.0
plt.figure(1)
plt.subplot(2,1,1)
plt.plot(uniform_bins_center,uniform_hist[0],'s--',uniform_bins_center,uniform_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Uniform: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(uniform_bins_center,uniform_hist[0] / uniform_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Uniform: Relative deviation to scipy')
plt.figure(2)
plt.subplot(2,1,1)
plt.plot(gauss_bins_center,gauss_hist[0],'s--',gauss_bins_center,gauss_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Gauss: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(gauss_bins_center,gauss_hist[0] / gauss_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Gauss: Relative deviation to scipy')
plt.figure(3)
plt.subplot(2,1,1)
plt.plot(rayleigh_bins_center,rayleigh_hist[0],'s--',rayleigh_bins_center,rayleigh_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Rayleigh: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(rayleigh_bins_center,rayleigh_hist[0] / rayleigh_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Rayleigh: Relative deviation to scipy')
plt.figure(4)
plt.subplot(2,1,1)
plt.plot(laplace_bins_center,laplace_hist[0],'s--',laplace_bins_center,laplace_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Laplace: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(laplace_bins_center,laplace_hist[0] / laplace_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Laplace: Relative deviation to scipy')
plt.show()
| gpl-3.0 |
jzt5132/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
iamgp/pyCa | pyCa/Graph.py | 1 | 2559 | from . import *
# Graphics Stuff
import matplotlib.pyplot as plt
class Graph(object):
"""docstring for Graph"""
def __init__(self, Experiment):
self.Experiment = Experiment
self.numberOfStimulantsAdded = 0
self.nameToUse = 0
def plot(self):
print ''
log(self.Experiment.name, colour="yellow")
log('==================', colour="yellow")
for i, col in self.Experiment.data.iteritems():
if i == 0:
col.name = "time"
if col.name == "time":
continue
fig, ax = plt.subplots(1)
plt.plot(self.Experiment.data.time, col, '-')
plt.title(col.name)
ax.set_ylim(
col.min() - (0.1 * col.min()), col.max() + (0.1 * col.max()))
self.nameToUse = 0
print ''
log(col.name, colour="red")
log('--------------------------------------', colour="red")
def onclick(event):
if self.numberOfStimulantsAdded == 0:
x1 = event.xdata
y1 = event.ydata
log(' > 1st point, adding x1:{} y1:{} to {}'.format(
x1, y1, self.Experiment.names[self.nameToUse]),
colour="black")
self.Experiment.currentCell.addFirstPoint(x1, y1)
self.numberOfStimulantsAdded = 1
elif self.numberOfStimulantsAdded == 1:
x2 = event.xdata
y2 = event.ydata
log(' > 2nd point, adding x2:{} y2:{} to {}'.format(
x2, y2, self.Experiment.names[self.nameToUse]),
colour="black")
self.Experiment.currentCell.addSecondPointWithName(
x2, y2, self.Experiment.names[self.nameToUse])
self.numberOfStimulantsAdded = 0
self.nameToUse = self.nameToUse + 1
fig.canvas.mpl_connect('button_press_event', onclick)
for t in self.Experiment.times:
plt.axvspan(t, t + 5, color='red', alpha=0.1)
plt.show()
self.Experiment.currentCell.cellname = col.name
self.Experiment.cells.append(self.Experiment.currentCell)
if self.Experiment.currentCell.describe() is not None:
log(self.Experiment.currentCell.describe(),
colour="black")
self.Experiment.currentCell = Cell()
| gpl-3.0 |
neuropoly/spinalcordtoolbox | spinalcordtoolbox/scripts/sct_maths.py | 1 | 20433 | #!/usr/bin/env python
#########################################################################################
#
# Perform mathematical operations on images
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Julien Cohen-Adad, Sara Dupont
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import os
import sys
import pickle
import gzip
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import spinalcordtoolbox.math as sct_math
from spinalcordtoolbox.image import Image
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, list_type, display_viewer_syntax
from spinalcordtoolbox.utils.sys import init_sct, printv, set_global_loglevel
from spinalcordtoolbox.utils.fs import extract_fname
def get_parser():
parser = SCTArgumentParser(
description='Perform mathematical operations on images. Some inputs can be either a number or a 4d image or '
'several 3d images separated with ","'
)
mandatory = parser.add_argument_group("MANDATORY ARGUMENTS")
mandatory.add_argument(
"-i",
metavar=Metavar.file,
help="Input file. Example: data.nii.gz",
required=True)
mandatory.add_argument(
"-o",
metavar=Metavar.file,
help='Output file. Example: data_mean.nii.gz',
required=True)
optional = parser.add_argument_group("OPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit")
basic = parser.add_argument_group('BASIC OPERATIONS')
basic.add_argument(
"-add",
metavar='',
nargs="+",
help='Add following input. Can be a number or multiple images (separated with space).',
required=False)
basic.add_argument(
"-sub",
metavar='',
nargs="+",
help='Subtract following input. Can be a number or an image.',
required=False)
basic.add_argument(
"-mul",
metavar='',
nargs="+",
help='Multiply by following input. Can be a number or multiple images (separated with space).',
required=False)
basic.add_argument(
"-div",
metavar='',
nargs="+",
help='Divide by following input. Can be a number or an image.',
required=False)
basic.add_argument(
'-mean',
help='Average data across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
'-rms',
help='Compute root-mean-squared across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
'-std',
help='Compute STD across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
"-bin",
type=float,
metavar=Metavar.float,
help='Binarize image using specified threshold. Example: 0.5',
required=False)
thresholding = parser.add_argument_group("THRESHOLDING METHODS")
thresholding.add_argument(
'-otsu',
type=int,
metavar=Metavar.int,
help='Threshold image using Otsu algorithm (from skimage). Specify the number of bins (e.g. 16, 64, 128)',
required=False)
thresholding.add_argument(
"-adap",
metavar=Metavar.list,
type=list_type(',', int),
help="R|Threshold image using Adaptive algorithm (from skimage). Provide 2 values separated by ',' that "
"correspond to the parameters below. For example, '-adap 7,0' corresponds to a block size of 7 and an "
"offset of 0.\n"
" - Block size: Odd size of pixel neighborhood which is used to calculate the threshold value. \n"
" - Offset: Constant subtracted from weighted mean of neighborhood to calculate the local threshold "
"value. Suggested offset is 0.",
required=False)
thresholding.add_argument(
"-otsu-median",
metavar=Metavar.list,
type=list_type(',', int),
help="R|Threshold image using Median Otsu algorithm (from dipy). Provide 2 values separated by ',' that "
"correspond to the parameters below. For example, '-otsu-median 3,5' corresponds to a filter size of 3 "
"repeated over 5 iterations.\n"
" - Size: Radius (in voxels) of the applied median filter.\n"
" - Iterations: Number of passes of the median filter.",
required=False)
thresholding.add_argument(
'-percent',
type=int,
help="Threshold image using percentile of its histogram.",
metavar=Metavar.int,
required=False)
thresholding.add_argument(
"-thr",
type=float,
help='Use following number to threshold image (zero below number).',
metavar=Metavar.float,
required=False)
mathematical = parser.add_argument_group("MATHEMATICAL MORPHOLOGY")
mathematical.add_argument(
'-dilate',
type=int,
metavar=Metavar.int,
help="Dilate binary or greyscale image with specified size. If shape={'square', 'cube'}: size corresponds to the length of "
"an edge (size=1 has no effect). If shape={'disk', 'ball'}: size corresponds to the radius, not including "
"the center element (size=0 has no effect).",
required=False)
mathematical.add_argument(
'-erode',
type=int,
metavar=Metavar.int,
help="Erode binary or greyscale image with specified size. If shape={'square', 'cube'}: size corresponds to the length of "
"an edge (size=1 has no effect). If shape={'disk', 'ball'}: size corresponds to the radius, not including "
"the center element (size=0 has no effect).",
required=False)
mathematical.add_argument(
'-shape',
help="R|Shape of the structuring element for the mathematical morphology operation. Default: ball.\n"
"If a 2D shape {'disk', 'square'} is selected, -dim must be specified.",
required=False,
choices=('square', 'cube', 'disk', 'ball'),
default='ball')
mathematical.add_argument(
'-dim',
type=int,
help="Dimension of the array which 2D structural element will be orthogonal to. For example, if you wish to "
"apply a 2D disk kernel in the X-Y plane, leaving Z unaffected, parameters will be: shape=disk, dim=2.",
required=False,
choices=(0, 1, 2))
filtering = parser.add_argument_group("FILTERING METHODS")
filtering.add_argument(
"-smooth",
metavar=Metavar.list,
type=list_type(',', float),
help='Gaussian smoothing filtering. Supply values for standard deviations in mm. If a single value is provided, '
'it will be applied to each axis of the image. If multiple values are provided, there must be one value '
'per image axis. (Examples: "-smooth 2.0,3.0,2.0" (3D image), "-smooth 2.0" (any-D image)).',
required=False)
filtering.add_argument(
'-laplacian',
metavar=Metavar.list,
type=list_type(',', float),
help='Laplacian filtering. Supply values for standard deviations in mm. If a single value is provided, it will '
'be applied to each axis of the image. If multiple values are provided, there must be one value per '
'image axis. (Examples: "-laplacian 2.0,3.0,2.0" (3D image), "-laplacian 2.0" (any-D image)).',
required=False)
filtering.add_argument(
'-denoise',
help='R|Non-local means adaptative denoising from P. Coupe et al. as implemented in dipy. Separate with ". Example: p=1,b=3\n'
' p: (patch radius) similar patches in the non-local means are searched for locally, inside a cube of side 2*p+1 centered at each voxel of interest. Default: p=1\n'
' b: (block radius) the size of the block to be used (2*b+1) in the blockwise non-local means implementation. Default: b=5 '
' Note, block radius must be smaller than the smaller image dimension: default value is lowered for small images)\n'
'To use default parameters, write -denoise 1',
required=False)
similarity = parser.add_argument_group("SIMILARITY METRIC")
similarity.add_argument(
'-mi',
metavar=Metavar.file,
help='Compute the mutual information (MI) between both input files (-i and -mi) as in: '
'http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mutual_info_score.html',
required=False)
similarity.add_argument(
'-minorm',
metavar=Metavar.file,
help='Compute the normalized mutual information (MI) between both input files (-i and -mi) as in: '
'http://scikit-learn.org/stable/modules/generated/sklearn.metrics.normalized_mutual_info_score.html',
required=False)
similarity.add_argument(
'-corr',
metavar=Metavar.file,
help='Compute the cross correlation (CC) between both input files (-i and -cc).',
required=False)
misc = parser.add_argument_group("MISC")
misc.add_argument(
'-symmetrize',
type=int,
help='Symmetrize data along the specified dimension.',
required=False,
choices=(0, 1, 2))
misc.add_argument(
'-type',
required=False,
help='Output type.',
choices=('uint8', 'int16', 'int32', 'float32', 'complex64', 'float64', 'int8', 'uint16', 'uint32', 'int64',
'uint64'))
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
return parser
# MAIN
# ==========================================================================================
def main(argv=None):
"""
Main function
:param argv:
:return:
"""
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_global_loglevel(verbose=verbose)
dim_list = ['x', 'y', 'z', 't']
fname_in = arguments.i
fname_out = arguments.o
output_type = arguments.type
# Open file(s)
im = Image(fname_in)
data = im.data # 3d or 4d numpy array
dim = im.dim
# run command
if arguments.otsu is not None:
param = arguments.otsu
data_out = sct_math.otsu(data, param)
elif arguments.adap is not None:
param = arguments.adap
data_out = sct_math.adap(data, param[0], param[1])
elif arguments.otsu_median is not None:
param = arguments.otsu_median
data_out = sct_math.otsu_median(data, param[0], param[1])
elif arguments.thr is not None:
param = arguments.thr
data_out = sct_math.threshold(data, param)
elif arguments.percent is not None:
param = arguments.percent
data_out = sct_math.perc(data, param)
elif arguments.bin is not None:
bin_thr = arguments.bin
data_out = sct_math.binarize(data, bin_thr=bin_thr)
elif arguments.add is not None:
data2 = get_data_or_scalar(arguments.add, data)
data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
data_out = np.sum(data_concat, axis=3)
elif arguments.sub is not None:
data2 = get_data_or_scalar(arguments.sub, data)
data_out = data - data2
elif arguments.laplacian is not None:
sigmas = arguments.laplacian
if len(sigmas) == 1:
sigmas = [sigmas for i in range(len(data.shape))]
elif len(sigmas) != len(data.shape):
printv(parser.error('ERROR: -laplacian need the same number of inputs as the number of image dimension OR only one input'))
# adjust sigma based on voxel size
sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
# smooth data
data_out = sct_math.laplacian(data, sigmas)
elif arguments.mul is not None:
data2 = get_data_or_scalar(arguments.mul, data)
data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
data_out = np.prod(data_concat, axis=3)
elif arguments.div is not None:
data2 = get_data_or_scalar(arguments.div, data)
data_out = np.divide(data, data2)
elif arguments.mean is not None:
dim = dim_list.index(arguments.mean)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.mean(data, dim)
elif arguments.rms is not None:
dim = dim_list.index(arguments.rms)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.sqrt(np.mean(np.square(data.astype(float)), dim))
elif arguments.std is not None:
dim = dim_list.index(arguments.std)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.std(data, dim, ddof=1)
elif arguments.smooth is not None:
sigmas = arguments.smooth
if len(sigmas) == 1:
sigmas = [sigmas[0] for i in range(len(data.shape))]
elif len(sigmas) != len(data.shape):
printv(parser.error('ERROR: -smooth need the same number of inputs as the number of image dimension OR only one input'))
# adjust sigma based on voxel size
sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
# smooth data
data_out = sct_math.smooth(data, sigmas)
elif arguments.dilate is not None:
if arguments.shape in ['disk', 'square'] and arguments.dim is None:
printv(parser.error('ERROR: -dim is required for -dilate with 2D morphological kernel'))
data_out = sct_math.dilate(data, size=arguments.dilate, shape=arguments.shape, dim=arguments.dim)
elif arguments.erode is not None:
if arguments.shape in ['disk', 'square'] and arguments.dim is None:
printv(parser.error('ERROR: -dim is required for -erode with 2D morphological kernel'))
data_out = sct_math.erode(data, size=arguments.erode, shape=arguments.shape, dim=arguments.dim)
elif arguments.denoise is not None:
# parse denoising arguments
p, b = 1, 5 # default arguments
list_denoise = (arguments.denoise).split(",")
for i in list_denoise:
if 'p' in i:
p = int(i.split('=')[1])
if 'b' in i:
b = int(i.split('=')[1])
data_out = sct_math.denoise_nlmeans(data, patch_radius=p, block_radius=b)
elif arguments.symmetrize is not None:
data_out = (data + data[list(range(data.shape[0] - 1, -1, -1)), :, :]) / float(2)
elif arguments.mi is not None:
# input 1 = from flag -i --> im
# input 2 = from flag -mi
im_2 = Image(arguments.mi)
compute_similarity(im, im_2, fname_out, metric='mi', metric_full='Mutual information', verbose=verbose)
data_out = None
elif arguments.minorm is not None:
im_2 = Image(arguments.minorm)
compute_similarity(im, im_2, fname_out, metric='minorm', metric_full='Normalized Mutual information', verbose=verbose)
data_out = None
elif arguments.corr is not None:
# input 1 = from flag -i --> im
# input 2 = from flag -mi
im_2 = Image(arguments.corr)
compute_similarity(im, im_2, fname_out, metric='corr', metric_full='Pearson correlation coefficient', verbose=verbose)
data_out = None
# if no flag is set
else:
data_out = None
printv(parser.error('ERROR: you need to specify an operation to do on the input image'))
if data_out is not None:
# Write output
nii_out = Image(fname_in) # use header of input file
nii_out.data = data_out
nii_out.save(fname_out, dtype=output_type)
# TODO: case of multiple outputs
# assert len(data_out) == n_out
# if n_in == n_out:
# for im_in, d_out, fn_out in zip(nii, data_out, fname_out):
# im_in.data = d_out
# im_in.absolutepath = fn_out
# if arguments.w is not None:
# im_in.hdr.set_intent('vector', (), '')
# im_in.save()
# elif n_out == 1:
# nii[0].data = data_out[0]
# nii[0].absolutepath = fname_out[0]
# if arguments.w is not None:
# nii[0].hdr.set_intent('vector', (), '')
# nii[0].save()
# elif n_out > n_in:
# for dat_out, name_out in zip(data_out, fname_out):
# im_out = nii[0].copy()
# im_out.data = dat_out
# im_out.absolutepath = name_out
# if arguments.w is not None:
# im_out.hdr.set_intent('vector', (), '')
# im_out.save()
# else:
# printv(parser.usage.generate(error='ERROR: not the correct numbers of inputs and outputs'))
# display message
if data_out is not None:
display_viewer_syntax([fname_out], verbose=verbose)
else:
printv('\nDone! File created: ' + fname_out, verbose, 'info')
def get_data(list_fname):
"""
Get data from list of file names
:param list_fname:
:return: 3D or 4D numpy array.
"""
try:
nii = [Image(f_in) for f_in in list_fname]
except Exception as e:
printv(str(e), 1, 'error') # file does not exist, exit program
data0 = nii[0].data
data = nii[0].data
# check that every images have same shape
for i in range(1, len(nii)):
if not np.shape(nii[i].data) == np.shape(data0):
printv('\nWARNING: shape(' + list_fname[i] + ')=' + str(np.shape(nii[i].data)) + ' incompatible with shape(' + list_fname[0] + ')=' + str(np.shape(data0)), 1, 'warning')
printv('\nERROR: All input images must have same dimensions.', 1, 'error')
else:
data = sct_math.concatenate_along_4th_dimension(data, nii[i].data)
return data
def get_data_or_scalar(argument, data_in):
"""
Get data from list of file names (scenario 1) or scalar (scenario 2)
:param argument: list of file names of scalar
:param data_in: if argument is scalar, use data to get np.shape
:return: 3d or 4d numpy array
"""
# try to convert argument in float
try:
# build data2 with same shape as data
data_out = data_in[:, :, :] * 0 + float(argument[0])
# if conversion fails, it should be a string (i.e. file name)
except ValueError:
data_out = get_data(argument)
return data_out
def compute_similarity(img1: Image, img2: Image, fname_out: str, metric: str, metric_full: str, verbose):
"""
Sanitize input and compute similarity metric between two images data.
"""
if img1.data.size != img2.data.size:
raise ValueError(f"Input images don't have the same size! \nPlease use \"sct_register_multimodal -i im1.nii.gz -d im2.nii.gz -identity 1\" to put the input images in the same space")
res, data1_1d, data2_1d = sct_math.compute_similarity(img1.data, img2.data, metric=metric)
if verbose > 1:
matplotlib.use('Agg')
plt.plot(data1_1d, 'b')
plt.plot(data2_1d, 'r')
plt.title('Similarity: ' + metric_full + ' = ' + str(res))
plt.savefig('fig_similarity.png')
path_out, filename_out, ext_out = extract_fname(fname_out)
if ext_out not in ['.txt', '.pkl', '.pklz', '.pickle']:
raise ValueError(f"The output file should a text file or a pickle file. Received extension: {ext_out}")
if ext_out == '.txt':
with open(fname_out, 'w') as f:
f.write(metric_full + ': \n' + str(res))
elif ext_out == '.pklz':
pickle.dump(res, gzip.open(fname_out, 'wb'), protocol=2)
else:
pickle.dump(res, open(fname_out, 'w'), protocol=2)
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
| mit |
rafaelmds/fatiando | gallery/gridder/cutting.py | 6 | 1326 | """
Cutting a section from spacial data
-----------------------------------
The :func:`fatiando.gridder.cut` function extracts points from spatially
distributed data that are inside a given area. It doesn't matter whether or
not the points are on a regular grid.
"""
from fatiando import gridder
import matplotlib.pyplot as plt
import numpy as np
# Generate some synthetic data
area = (-100, 100, -60, 60)
x, y = gridder.scatter(area, 1000, seed=0)
data = np.sin(0.1*x)*np.cos(0.1*y)
# Select the data that fall inside "section"
section = [-40, 40, -25, 25]
# Tip: you pass more than one data array as input. Use this to cut multiple
# data sources (e.g., gravity + height + topography).
x_sub, y_sub, [data_sub] = gridder.cut(x, y, [data], section)
# Plot the original data besides the cut section
plt.figure(figsize=(8, 6))
plt.subplot(1, 2, 1)
plt.axis('scaled')
plt.title("Whole data")
plt.tricontourf(y, x, data, 30, cmap='RdBu_r')
plt.plot(y, x, 'xk')
x1, x2, y1, y2 = section
plt.plot([y1, y2, y2, y1, y1], [x1, x1, x2, x2, x1], '-k', linewidth=3)
plt.xlim(area[2:])
plt.ylim(area[:2])
plt.subplot(1, 2, 2)
plt.axis('scaled')
plt.title("Subsection")
plt.plot(y_sub, x_sub, 'xk')
plt.tricontourf(y_sub, x_sub, data_sub, 30, cmap='RdBu_r')
plt.xlim(section[2:])
plt.ylim(section[:2])
plt.tight_layout()
plt.show()
| bsd-3-clause |
danielhkl/matplotlib2tikz | matplotlib2tikz/color.py | 1 | 2761 | # -*- coding: utf-8 -*-
#
import matplotlib as mpl
import numpy
def mpl_color2xcolor(data, matplotlib_color):
'''Translates a matplotlib color specification into a proper LaTeX xcolor.
'''
# Convert it to RGBA.
my_col = numpy.array(mpl.colors.ColorConverter().to_rgba(matplotlib_color))
# If the alpha channel is exactly 0, then the color is really 'none'
# regardless of the RGB channels.
if my_col[-1] == 0.0:
return data, 'none', my_col
xcol = None
# RGB values (as taken from xcolor.dtx):
available_colors = {
'red': numpy.array([1, 0, 0]),
'green': numpy.array([0, 1, 0]),
'blue': numpy.array([0, 0, 1]),
'brown': numpy.array([0.75, 0.5, 0.25]),
'lime': numpy.array([0.75, 1, 0]),
'orange': numpy.array([1, 0.5, 0]),
'pink': numpy.array([1, 0.75, 0.75]),
'purple': numpy.array([0.75, 0, 0.25]),
'teal': numpy.array([0, 0.5, 0.5]),
'violet': numpy.array([0.5, 0, 0.5]),
'black': numpy.array([0, 0, 0]),
'darkgray': numpy.array([0.25, 0.25, 0.25]),
'gray': numpy.array([0.5, 0.5, 0.5]),
'lightgray': numpy.array([0.75, 0.75, 0.75]),
'white': numpy.array([1, 1, 1])
# The colors cyan, magenta, yellow, and olive are also
# predefined by xcolor, but their RGB approximation of the
# native CMYK values is not very good. Don't use them here.
}
available_colors.update(data['custom colors'])
# Check if it exactly matches any of the colors already available.
# This case is actually treated below (alpha==1), but that loop
# may pick up combinations with black before finding the exact
# match. Hence, first check all colors.
for name, rgb in available_colors.items():
if all(my_col[:3] == rgb):
xcol = name
return data, xcol, my_col
# Check if my_col is a multiple of a predefined color and 'black'.
for name, rgb in available_colors.items():
if name == 'black':
continue
if rgb[0] != 0.0:
alpha = my_col[0] / rgb[0]
elif rgb[1] != 0.0:
alpha = my_col[1] / rgb[1]
else:
assert rgb[2] != 0.0
alpha = my_col[2] / rgb[2]
# The cases 0.0 (my_col == black) and 1.0 (my_col == rgb) are
# already accounted for by checking in available_colors above.
if all(my_col[:3] == alpha * rgb) and 0.0 < alpha < 1.0:
xcol = name + ('!%r!black' % (alpha * 100))
return data, xcol, my_col
# Lookup failed, add it to the custom list.
xcol = 'color' + str(len(data['custom colors']))
data['custom colors'][xcol] = my_col[:3]
return data, xcol, my_col
| mit |
tdhopper/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 69 | 8605 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
smblance/ggplot | ggplot/tests/test_chart_components.py | 12 | 1664 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
from nose.tools import assert_raises, assert_equal, assert_is_none
from ggplot import *
from ggplot.utils.exceptions import GgplotError
def test_chart_components():
"""
Test invalid arguments to chart components
"""
df = pd.DataFrame({'x': np.arange(10),
'y': np.arange(10)})
gg = ggplot(df, aes(x='x', y='y'))
# test ggtitle
assert_raises(GgplotError, ggtitle, None)
# test xlim
assert_raises(GgplotError, xlim, "foo", 1)
assert_raises(GgplotError, xlim, "foo", "bar")
# test ylim
assert_raises(GgplotError, ylim, "foo", 1)
assert_raises(GgplotError, ylim, "foo", "bar")
# test xlab
assert_raises(GgplotError, ylab, None)
# test ylab
assert_raises(GgplotError, ylab, None)
# test labs
test_xlab = 'xlab'
gg_xlab = gg + labs(x=test_xlab)
assert_equal(gg_xlab.xlab, test_xlab)
assert_is_none(gg_xlab.ylab)
assert_is_none(gg_xlab.title)
test_ylab = 'ylab'
gg_ylab = gg + labs(y=test_ylab)
assert_is_none(gg_ylab.xlab)
assert_equal(gg_ylab.ylab, test_ylab)
assert_is_none(gg_ylab.title)
test_title = 'title'
gg_title = gg + labs(title=test_title)
assert_is_none(gg_title.xlab)
assert_is_none(gg_title.ylab)
assert_equal(gg_title.title, test_title)
gg_labs = gg + labs(x=test_xlab, y=test_ylab, title=test_title)
assert_equal(gg_labs.xlab, test_xlab)
assert_equal(gg_labs.ylab, test_ylab)
assert_equal(gg_labs.title, test_title)
| bsd-2-clause |
noahbenson/neuropythy | neuropythy/graphics/__init__.py | 1 | 1109 | ####################################################################################################
# neuropythy/graphics/__init__.py
# Simple tools for making matplotlib/pyplot graphics with neuropythy.
# By Noah C. Benson
'''
The neuropythy.graphics package contains definitions of the various tools for making plots with
cortical data. The primary entry point is the function cortex_plot.
'''
from .core import (
cmap_curvature,
cmap_polar_angle_sym, cmap_polar_angle_lh, cmap_polar_angle_rh, cmap_polar_angle,
cmap_theta_sym, cmap_theta_lh, cmap_theta_rh, cmap_theta,
cmap_eccentricity, cmap_log_eccentricity, cmap_radius, cmap_log_radius,
cmap_cmag, cmap_log_cmag, label_cmap,
vertex_curvature_color, vertex_weight,
vertex_angle, vertex_eccen, vertex_sigma, vertex_varea,
vertex_angle_color, vertex_eccen_color, vertex_sigma_color, vertex_varea_color,
angle_colors, eccen_colors, sigma_colors, radius_colors, varea_colors, to_rgba,
color_overlap, visual_field_legend, curvature_colors, cortex_plot, cortex_plot_colors,
ROIDrawer, trace_roi, scale_for_cmap)
| agpl-3.0 |
google/autocjk | src/model.py | 1 | 14838 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAN for generating CJK characters.
The vast majority of this code is adapted from the pix2pix GAN described in
https://www.tensorflow.org/tutorials/generative/pix2pix. Changes include the
specific tensor dimensions, some tuning of magic numbers, and some changes to
loss functions.
TODO(ambuc): This file has type annotations because they're useful for a human
reader, but the build system doesn't yet enforce them with a strictly-typed
python build rule.
"""
import time
from typing import List, Text, Tuple
from IPython import display
import matplotlib.pyplot as plt
import tensorflow as tf
_LAMBDA = 100
def _load_image(filename: Text) -> List[List[tf.Tensor]]:
"""Given the filename of a PNG, returns a list of three tensors: a, b, a+b.
Args:
filename: Path to a file. The file must be a PNG and greyscale and 256x256.
Returns:
A list of tensors: a, b, and a+b.
"""
image = tf.io.read_file(filename)
image = tf.image.decode_png(image, channels=1) # greyscale
# Our images have a width which is divisible by three.
w = tf.shape(image)[1] // 3
return [
tf.cast(image[:, n * w:(n + 1) * w, :], tf.float32) for n in range(3)
]
def make_datasets(files_glob: Text) -> Tuple[tf.data.Dataset, tf.data.Dataset]:
"""Makes the train/test datasets.
Args:
files_glob: A glob (like "/tmp/folder/*.png") of all the input images.
Returns:
A pair of train, test datasets of type tf.data.Dataset.
"""
ds = tf.data.Dataset.list_files(files_glob).map(
_load_image, num_parallel_calls=tf.data.AUTOTUNE).shuffle(400).batch(1)
train_dataset_a = ds.shard(num_shards=3, index=0)
train_dataset_b = ds.shard(num_shards=3, index=1)
train_ds = train_dataset_a.concatenate(train_dataset_b)
test_ds = ds.shard(num_shards=3, index=2)
return train_ds, test_ds
def _downsample(filters: int,
size: int,
apply_batchnorm: bool = True) -> tf.keras.Sequential:
"""Downsampler from https://www.tensorflow.org/tutorials/generative/pix2pix#build_the_generator.
Args:
filters: The number of filters.
size: The size of the input tensor width at this step.
apply_batchnorm: Whether or not to apply batch normalization. Probably
should be false on the input layer, and true elsewhere.
Returns:
A sequential model.
"""
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(
tf.keras.layers.Conv2D(filters,
size,
strides=2,
padding='same',
kernel_initializer=initializer,
use_bias=False))
if apply_batchnorm:
result.add(tf.keras.layers.BatchNormalization())
result.add(tf.keras.layers.LeakyReLU())
return result
def _upsample(filters: int,
size: int,
apply_dropout: bool = False) -> tf.keras.Sequential:
"""Upsampler from https://www.tensorflow.org/tutorials/generative/pix2pix#build_the_generator.
Args:
filters: The number of filters.
size: The size of the input tensor width at this step.
apply_dropout: Whether or not to apply dropout. Probably should be true for
the first few layers and false elsewhere.
Returns:
A sequential model.
"""
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(
tf.keras.layers.Conv2DTranspose(filters,
size,
strides=2,
padding='same',
kernel_initializer=initializer,
use_bias=False))
result.add(tf.keras.layers.BatchNormalization())
if apply_dropout:
result.add(tf.keras.layers.Dropout(0.5))
result.add(tf.keras.layers.ReLU())
return result
def make_generator() -> tf.keras.Model:
"""Creates a generator.
99% of this is copied directly from
https://www.tensorflow.org/tutorials/generative/pix2pix#build_the_generator,
except for the input shape (now two channels, two greyscale images instead of
one RGB image) and output shape (one channel, one greyscale image instead of
one RGB image).
Returns:
a tf.keras.Model which returns a 256x256x1 tensor.
"""
inputs = tf.keras.layers.Input(shape=[256, 256, 2])
up_stack = [
_upsample(512, 4, apply_dropout=True), # (bs, 2, 2, 1024)
_upsample(512, 4, apply_dropout=True), # (bs, 4, 4, 1024)
_upsample(512, 4, apply_dropout=True), # (bs, 8, 8, 1024)
_upsample(512, 4), # (bs, 16, 16, 1024)
_upsample(256, 4), # (bs, 32, 32, 512)
_upsample(128, 4), # (bs, 64, 64, 256)
_upsample(64, 4), # (bs, 128, 128, 128)
]
x = inputs
skips = []
for down in [
_downsample(64, 4, apply_batchnorm=False), # (bs, 128, 128, 64)
_downsample(128, 4), # (bs, 64, 64, 128)
_downsample(256, 4), # (bs, 32, 32, 256)
_downsample(512, 4), # (bs, 16, 16, 512)
_downsample(512, 4), # (bs, 8, 8, 512)
_downsample(512, 4), # (bs, 4, 4, 512)
_downsample(512, 4), # (bs, 2, 2, 512)
_downsample(512, 4), # (bs, 1, 1, 512)
]:
x = down(x)
skips.append(x)
skips = reversed(skips[:-1])
# Upsampling and establishing the skip connections
for up, skip in zip(up_stack, skips):
x = up(x)
x = tf.keras.layers.Concatenate()([x, skip])
last = tf.keras.layers.Conv2DTranspose(
1, # one output channel, i.e. greyscale
4,
strides=2,
padding='same',
kernel_initializer=tf.random_normal_initializer(0., 0.02),
activation='tanh') # (bs, 256, 256, 3)
x = last(x)
return tf.keras.Model(inputs=inputs, outputs=x)
def generator_loss(loss_object: tf.keras.losses.Loss, disc_generated_output,
gen_output, target):
gan_loss = loss_object(tf.ones_like(disc_generated_output),
disc_generated_output)
# mean absolute error
l1_loss = tf.reduce_mean(tf.abs(target - gen_output))
total_gen_loss = gan_loss + (_LAMBDA * l1_loss)
return total_gen_loss, gan_loss, l1_loss
def make_discriminator() -> tf.keras.Model:
"""Returns a discriminator.
This is 99% the same as
https://www.tensorflow.org/tutorials/generative/pix2pix#build_the_discriminator,
except that the shape of the input and output tensors are different.
Returns:
A tf.keras.model which accepts a 256x256x2 tensor and compares it to a
target 256x256x1 tensor.
"""
initializer = tf.random_normal_initializer(0., 0.02)
input_img = tf.keras.layers.Input(shape=[256, 256, 2], name='input_image')
target_img = tf.keras.layers.Input(shape=[256, 256, 1],
name='target_image')
x = tf.keras.layers.concatenate([input_img,
target_img]) # (bs, 256, 256, channels*2)
down1 = _downsample(64, 4, False)(x) # (bs, 128, 128, 64)
down2 = _downsample(128, 4)(down1) # (bs, 64, 64, 128)
down3 = _downsample(256, 4)(down2) # (bs, 32, 32, 256)
zero_pad1 = tf.keras.layers.ZeroPadding2D()(down3) # (bs, 34, 34, 256)
conv = tf.keras.layers.Conv2D(512,
4,
strides=1,
kernel_initializer=initializer,
use_bias=False)(
zero_pad1) # (bs, 31, 31, 512)
batchnorm1 = tf.keras.layers.BatchNormalization()(conv)
leaky_relu = tf.keras.layers.LeakyReLU()(batchnorm1)
zero_pad2 = tf.keras.layers.ZeroPadding2D()(
leaky_relu) # (bs, 33, 33, 512)
last = tf.keras.layers.Conv2D(1,
4,
strides=1,
kernel_initializer=initializer)(
zero_pad2) # (bs, 30, 30, 1)
return tf.keras.Model(inputs=[input_img, target_img], outputs=last)
def discriminator_loss(loss_object: tf.keras.losses.Loss, disc_real_output,
disc_generated_output) -> float:
"""Returns discriminator loss.
100% the same as
https://www.tensorflow.org/tutorials/generative/pix2pix#build_the_discriminator.
Args:
loss_object: A reusable loss_object of type
tf.keras.losses.BinaryCrossentropy.
disc_real_output: A set of real images.
disc_generated_output: A set of generator images.
Returns:
The sum of some loss functions.
"""
real_loss = loss_object(tf.ones_like(disc_real_output), disc_real_output)
generated_loss = loss_object(tf.zeros_like(disc_generated_output),
disc_generated_output)
return real_loss + generated_loss
def generate_images(model: tf.keras.Model, input_a: tf.Tensor,
input_b: tf.Tensor, target: tf.Tensor) -> None:
"""In Colab, prints [a | b | real(a,b) | predicted(a,b)] to the display.
Args:
model: The generator to use.
input_a: the LHS image.
input_b: the RHS image.
target: The real(a,b) composition.
"""
x = tf.concat([input_a, input_b], 3)
x = tf.reshape(x, [256, 256, 2])
prediction = model(x[tf.newaxis, ...], training=True)
images = [input_a[0], input_b[0], target[0], prediction[0]]
fig, axes = plt.subplots(1, 4)
titles = [
'Input Image A', 'Input Image B', 'Ground Truth', 'Predicted Image'
]
for image, axis, title in zip(images, axes, titles):
axis.set_title(title)
axis.imshow(image[:, :, 0])
axis.axis('off')
fig.show()
@tf.function
def train_step(generator: tf.keras.Model,
generator_optimizer: tf.keras.optimizers.Optimizer,
discriminator: tf.keras.Model,
discriminator_optimizer: tf.keras.optimizers.Optimizer,
loss_object: tf.keras.losses.Loss, inp_a: tf.Tensor,
inp_b: tf.Tensor, target: tf.Tensor, epoch: int,
summary_writer: tf.summary.SummaryWriter) -> None:
"""Trains the models for one (1) epoch.
See https://www.tensorflow.org/tutorials/generative/pix2pix#training.
Args:
generator: A generator model,
generator_optimizer: and an optimizer for the generator.
discriminator: A discriminator model,
discriminator_optimizer: and an optimizer for the generator.
loss_object: A reusable BinaryCrossentropy object.
inp_a: A full-width image of the left-most component.
inp_b: A full-width image of the right-most component.
target: The human-authored image of the a+b character.
epoch: The index of the epoch we're in.
summary_writer: A SummaryWriter object for writing.... summaries.
"""
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
inp_x = tf.concat([inp_a, inp_b], 3)
gen_output = generator(inp_x, training=True)
disc_real_output = discriminator([inp_x, target], training=True)
disc_generated_output = discriminator([inp_x, gen_output],
training=True)
gen_total_loss, gen_gan_loss, gen_l1_loss = generator_loss(
loss_object, disc_generated_output, gen_output, target)
disc_loss = discriminator_loss(loss_object, disc_real_output,
disc_generated_output)
# TODO(ambuc): Should this simply be gen_l1_loss?
generator_gradients = gen_tape.gradient(gen_total_loss,
generator.trainable_variables)
discriminator_gradients = disc_tape.gradient(
disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(
zip(generator_gradients, generator.trainable_variables))
discriminator_optimizer.apply_gradients(
zip(discriminator_gradients, discriminator.trainable_variables))
with summary_writer.as_default():
tf.summary.scalar('gen_total_loss', gen_total_loss, step=epoch)
tf.summary.scalar('gen_gan_loss', gen_gan_loss, step=epoch)
tf.summary.scalar('gen_l1_loss', gen_l1_loss, step=epoch)
tf.summary.scalar('disc_loss', disc_loss, step=epoch)
def fit(generator: tf.keras.Model,
generator_optimizer: tf.keras.optimizers.Optimizer,
discriminator: tf.keras.Model,
discriminator_optimizer: tf.keras.optimizers.Optimizer,
loss_object: tf.keras.losses.Loss, train_ds: tf.data.Dataset,
epochs: int, test_ds: tf.data.Dataset, checkpoint: tf.train.Checkpoint,
checkpoint_prefix: Text,
summary_writer: tf.summary.SummaryWriter) -> None:
"""Runs for |epochs| and trains the models.
Args:
generator: A generator model,
generator_optimizer: and an optimizer for the generator.
discriminator: A discriminator model,
discriminator_optimizer: and an optimizer for the generator.
loss_object: A reusable BinaryCrossentropy object.
train_ds:
epochs: The number of epochs to train for.
test_ds:
checkpoint:
checkpoint_prefix:
summary_writer: A SummaryWriter object for writing.... summaries.
"""
for epoch in range(epochs):
start = time.time()
display.clear_output(wait=True)
for a, b, ab in test_ds.take(1):
generate_images(generator, a, b, ab)
print('Epoch: ', epoch)
for n, (inp_a, inp_b, target) in train_ds.enumerate():
print('.', end='')
if (n + 1) % 100 == 0:
print()
train_step(generator, generator_optimizer, discriminator,
discriminator_optimizer, loss_object, inp_a, inp_b,
target, epoch, summary_writer)
print()
checkpoint.save(file_prefix=checkpoint_prefix)
print('Time taken for epoch {} is {} sec\n'.format(
epoch + 1,
time.time() - start))
checkpoint.save(file_prefix=checkpoint_prefix)
| apache-2.0 |
nikitasingh981/scikit-learn | examples/preprocessing/plot_scaling_importance.py | 45 | 5269 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Importance of Feature Scaling
=========================================================
Feature scaling though standardization (or Z-score normalization)
can be an important preprocessing step for many machine learning
algorithms. Standardization involves rescaling the features such
that they have the properties of a standard normal distribution
with a mean of zero and a standard deviation of one.
While many algorithms (such as SVM, K-nearest neighbors, and logistic
regression) require features to be normalized, intuitively we can
think of Principle Component Analysis (PCA) as being a prime example
of when normalization is important. In PCA we are interested in the
components that maximize the variance. If one component (e.g. human
height) varies less than another (e.g. weight) because of their
respective scales (meters vs. kilos), PCA might determine that the
direction of maximal variance more closely corresponds with the
'weight' axis, if those features are not scaled. As a change in
height of one meter can be considered much more important than the
change in weight of one kilogram, this is clearly incorrect.
To illustrate this, PCA is performed comparing the use of data with
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` applied,
to unscaled data. The results are visualized and a clear difference noted.
The 1st principal component in the unscaled set can be seen. It can be seen
that feature #13 dominates the direction, being a whole two orders of
magnitude above the other features. This is contrasted when observing
the principal component for the scaled version of the data. In the scaled
version, the orders of magnitude are roughly the same across all the features.
The dataset used is the Wine Dataset available at UCI. This dataset
has continuous features that are heterogeneous in scale due to differing
properties that they measure (i.e alcohol content, and malic acid).
The transformed data is then used to train a naive Bayes classifier, and a
clear difference in prediction accuracies is observed wherein the dataset
which is scaled before PCA vastly outperforms the unscaled version.
"""
from __future__ import print_function
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.datasets import load_wine
from sklearn.pipeline import make_pipeline
print(__doc__)
# Code source: Tyler Lanigan <tylerlanigan@gmail.com>
# Sebastian Raschka <mail@sebastianraschka.com>
# License: BSD 3 clause
RANDOM_STATE = 42
FIG_SIZE = (10, 7)
features, target = load_wine(return_X_y=True)
# Make a train/test split using 30% test size
X_train, X_test, y_train, y_test = train_test_split(features, target,
test_size=0.30,
random_state=RANDOM_STATE)
# Fit to data and predict using pipelined GNB and PCA.
unscaled_clf = make_pipeline(PCA(n_components=2), GaussianNB())
unscaled_clf.fit(X_train, y_train)
pred_test = unscaled_clf.predict(X_test)
# Fit to data and predict using pipelined scaling, GNB and PCA.
std_clf = make_pipeline(StandardScaler(), PCA(n_components=2), GaussianNB())
std_clf.fit(X_train, y_train)
pred_test_std = std_clf.predict(X_test)
# Show prediction accuracies in scaled and unscaled data.
print('\nPrediction accuracy for the normal test dataset with PCA')
print('{:.2%}\n'.format(metrics.accuracy_score(y_test, pred_test)))
print('\nPrediction accuracy for the standardized test dataset with PCA')
print('{:.2%}\n'.format(metrics.accuracy_score(y_test, pred_test_std)))
# Extract PCA from pipeline
pca = unscaled_clf.named_steps['pca']
pca_std = std_clf.named_steps['pca']
# Show first principal componenets
print('\nPC 1 without scaling:\n', pca.components_[0])
print('\nPC 1 with scaling:\n', pca_std.components_[0])
# Scale and use PCA on X_train data for visualization.
scaler = std_clf.named_steps['standardscaler']
X_train_std = pca_std.transform(scaler.transform(X_train))
# visualize standardized vs. untouched dataset with PCA performed
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=FIG_SIZE)
for l, c, m in zip(range(0, 3), ('blue', 'red', 'green'), ('^', 's', 'o')):
ax1.scatter(X_train[y_train == l, 0], X_train[y_train == l, 1],
color=c,
label='class %s' % l,
alpha=0.5,
marker=m
)
for l, c, m in zip(range(0, 3), ('blue', 'red', 'green'), ('^', 's', 'o')):
ax2.scatter(X_train_std[y_train == l, 0], X_train_std[y_train == l, 1],
color=c,
label='class %s' % l,
alpha=0.5,
marker=m
)
ax1.set_title('Training dataset after PCA')
ax2.set_title('Standardized training dataset after PCA')
for ax in (ax1, ax2):
ax.set_xlabel('1st principal component')
ax.set_ylabel('2nd principal component')
ax.legend(loc='upper right')
ax.grid()
plt.tight_layout()
plt.show()
| bsd-3-clause |