repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
soulmachine/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
BillFoland/daisyluAMR | system/daisylu_system.py | 1 | 13721 |
import os
import sys
import pickle
import pandas as pd
import numpy as np
import hashlib
import os.path
from daisylu_config import *
from daisylu_vectors import *
from sentences import *
from daisylu_output import *
""
def addWikificationToDFrames(sents, sTypes, sentenceAttr):
# need to split this up into manageable file sizes, the wikifier dies with out of memory error currently
maxPartitionLen=200000
resultsDir = getSystemPath('daisyluPython') + '/wikificationData'
# if the working directory does not exist, create it.
# './wikificationData/input/test0.txt'
if not os.path.exists(resultsDir):
os.makedirs(resultsDir)
if not os.path.exists(resultsDir+'/input'):
os.makedirs(resultsDir+'/input')
if not os.path.exists(resultsDir+'/output'):
os.makedirs(resultsDir+'/output')
for sType in sTypes:
partitions = [ {'textString':'', 'charMapper':{}} ]
for sentIX in range(len(sents[sType])):
if len(partitions[-1]['textString']) > maxPartitionLen:
partitions.append( {'textString':'', 'charMapper':{}})
print 'THERE ARE NOW %d PARTITIONS' % len(partitions)
print '====================================================================================================================='
print '====================================================================================================================='
print
sentence = sents[sType][sentIX]
if not sentIX % 100: print 'addWikificationToDFrames', sType, sentIX
if not hasattr(sentence, sentenceAttr):
continue
sdf = getattr(sentence, sentenceAttr)
if sdf.empty:
continue
sdf['NERForm'] = ''
sdf['NERLabel'] = 'O'
sdf['WKCategory'] = ''
sdf['WKLink'] = ''
sdf['WKLinker'] = np.nan
sdf['WKRanker'] = np.nan
df = sdf[['wordIX','words','txFunc','txBIOES','nameCategory','wiki']].copy()
df['type'] = sType
df['sentIX'] = sentIX
df['allTStart'] = -1
df['allTEnd'] = -1
for i,t in enumerate(sentence.tokens):
startOffset = len(partitions[-1]['textString'])
partitions[-1]['textString'] += t
endOffset = len(partitions[-1]['textString'])
if (any(df.wordIX == i)):
df['allTStart']=startOffset
df['allTEnd']=endOffset
partitions[-1]['charMapper'][startOffset] = (sentIX, i, t)
partitions[-1]['textString'] += ' '
partitions[-1]['textString'] += '\n\n'
allText = ''
for x in partitions:
allText += x['textString']
m = hashlib.md5()
m.update(allText)
md5 = m.hexdigest()
print md5
cacheFn = 'wikificationData/' + md5 + '.pcl'
if not os.path.isfile(cacheFn): # calculate and archive the info, use it later if the same set of sentences is called for
wconfigs = []
wconfigs.append({
'config' : 'configs/STAND_ALONE_NO_INFERENCE.xml',
'inputFn' : resultsDir + '/input/test%d.txt',
'outputDn' : '%s/output/' % resultsDir,
})
info = { 'NER':{}, 'wiki':{} }
# partitions = pickle.load( open( 'wikiPartitions.pcl' ) )
"""
If you're using this system, please cite the paper.
Relational Inference for Wikification
Xiao Cheng and Dan Roth
EMNLP 2013
"""
for p, partition in enumerate(partitions):
for wtype in wconfigs:
tfile = open(wtype['inputFn'] % p, 'wb')
tfile.write(partition['textString'])
tfile.close()
direc = getSystemPath('Wikifier2013')
config = wtype['config']
inputFn = wtype['inputFn'] % p
outputDn = wtype['outputDn']
stencil = '/usr/bin/java -Xmx10G -jar dist/wikifier-3.0-jar-with-dependencies.jar -annotateData %s %s false %s'
cmd = stencil % (inputFn, outputDn, config)
print cmd
errorCode = os.system('cd %s; %s' % (direc, cmd) )
if errorCode:
raise ValueError('ERROR!\n non zero error code %d' % errorCode)
exit(1)
import xmltodict
from bs4 import BeautifulSoup
for p, partition in enumerate(partitions):
print 'Partition %d' % p
charMapper = partitions[p]['charMapper']
html = open('%s/output/' % resultsDir + '/test%d.txt.NER.tagged' % p).read()
parsed_html = BeautifulSoup(html, "lxml")
ner={ 'start':[], 'end':[], 'form':[], 'label':[]}
for item in parsed_html.find_all('start'):
ner['start'].append(int(item.text))
for item in parsed_html.find_all('end'):
ner['end'].append(int(item.text))
for item in parsed_html.find_all('form'):
ner['form'].append(item.text)
for item in parsed_html.find_all('label' ):
ner['label'].append(item.text)
for i in range(len(ner['start'])):
if not i % 100: print 'ner', i
tset = set()
for z in range(ner['start'][i],ner['end'][i]):
if z in charMapper:
tset.add(charMapper[z])
for trip in list(tset):
(six, wix, _) = trip
if not six in info['NER']:
info['NER'][six] = { 'NERForm':{}, 'NERLabel':{} }
info['NER'][six]['NERForm'][wix] = ner['form'][i]
info['NER'][six]['NERLabel'][wix] = ner['label'][i]
with open('%s/output/' % resultsDir + '/test%d.txt.wikification.tagged.full.xml' % p) as fd:
obj = xmltodict.parse(fd.read())
if obj['WikifierOutput']['WikifiedEntities']:
entities = obj['WikifierOutput']['WikifiedEntities']['Entity']
for entity in entities:
#entityText = entity['EntitySurfaceForm']
entityStartOffset = int(entity['EntityTextStart'])
entityEndOffset = int(entity['EntityTextEnd'])
linkerScore = float(entity['LinkerScore'])
rankerScore = float(entity['TopDisambiguation']['RankerScore'])
wikiTitle = entity['TopDisambiguation']['WikiTitle']
attributes = entity['TopDisambiguation']['Attributes']
#print entityText, entityStartOffset, entityEndOffset, textString[entityStartOffset:entityEndOffset]
tset = set()
for z in range(entityStartOffset,entityEndOffset+1):
if z in charMapper:
tset.add(charMapper[z])
for trip in list(tset):
(six, wix, _) = trip
if not six in info['wiki']:
info['wiki'][six] = { 'WKCategory':{}, 'WKLink':{}, 'WKLinker':{}, 'WKRanker':{} }
info['wiki'][six]['WKCategory'][wix] = attributes
info['wiki'][six]['WKLink'][wix] = wikiTitle
info['wiki'][six]['WKLinker'][wix] = linkerScore
info['wiki'][six]['WKRanker'][wix] = rankerScore
pickle.dump( info, open( cacheFn, "wb" ) )
else:
info = pickle.load( open( cacheFn, "rb" ) )
for six in info['NER']:
sentence = sents[sType][six]
if not hasattr(sentence, sentenceAttr):
continue
sdf = getattr(sentence, sentenceAttr)
for wix in info['NER'][six]['NERForm']:
sdf.loc[ (sdf.wordIX == wix), 'NERForm'] = info['NER'][six]['NERForm'][wix]
sdf.loc[ (sdf.wordIX == wix), 'NERLabel'] = info['NER'][six]['NERLabel'][wix]
for six in info['wiki']:
sentence = sents[sType][six]
if not hasattr(sentence, sentenceAttr):
continue
sdf = getattr(sentence, sentenceAttr)
for wix in info['wiki'][six]['WKCategory']:
sdf.loc[ (sdf.wordIX == wix), 'WKCategory'] = info['wiki'][six]['WKCategory'][wix]
sdf.loc[ (sdf.wordIX == wix), 'WKLink'] = info['wiki'][six]['WKLink'][wix]
sdf.loc[ (sdf.wordIX == wix), 'WKLinker'] = info['wiki'][six]['WKLinker'][wix]
sdf.loc[ (sdf.wordIX == wix), 'WKRanker'] = info['wiki'][six]['WKRanker'][wix]
def initializePredictionDataFrames(sents, ixList=None, NEWPrediction=False):
if not ixList:
ixList = range(len(sents['test']))
for sentIX in ixList:
sentence = sents['test'][sentIX]
tagList = getSentenceDFTagList()
if NEWPrediction:
tagList += ['NEWPrediction']
df = pd.DataFrame( columns=tagList )
if not (sentIX %1000):
print 'initializing pred frome ', sentIX
df['wordIX'] = range(len(sentence.tokens))
df['sentIX'] = sentIX
df['words'] = sentence.tokens
df['txBIOES'] = 'O'
sentence.predictedDFrame = df
addWikificationToDFrames(sents, ['test'], 'predictedDFrame')
print 'CLIPPING ALL SENTENCES TO LENGTH 100'
for sentIX in ixList:
sentence = sents['test'][sentIX]
sentence.predictedDFrame = sentence.predictedDFrame[sentence.predictedDFrame['wordIX'] < 100]
def createVectorsFromDataFrames(sents, sentenceAttr, dbf, dbtf, systemName, keepSense=True, L0OnlyFromFeaturesDB=False, useDistSG=False ):
wordDF = []
dbfn = getSystemPath('daisylu') + 'data/%s' % dbf
dbTestFn = getSystemPath('daisylu') + 'data/%s' % dbtf
merged = mergeSentenceDataFrames(None, ['test'], None, sents=sents, sentenceAttr=sentenceAttr)
if systemName== 'AMRL0NoNER':
createAMRL0Vectors(None, dbTestFn, 100.0, keepSense, sTypes=['test'], vectors=merged, featuresDB=dbfn, maxSents=None, useNER=False)
elif systemName== 'AMRL0':
wordDF = createAMRL0Vectors(None, dbTestFn, 100.0, keepSense, sTypes=['test'], vectors=merged, featuresDB=dbfn, maxSents=None )
elif systemName== 'AMRL0Args':
createAMRL0ArgVectors(None, dbTestFn, 100.0, keepSense, sTypes=['test'], vectors=merged, featuresDB=dbfn, maxSents=None,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB, useDistSG=useDistSG )
elif systemName== 'AMRL0Nargs':
createAMRL0NargVectors(None, dbTestFn, 100.0, keepSense, sTypes=['test'], vectors=merged, featuresDB=dbfn, maxSents=None,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB, useDistSG=useDistSG )
elif systemName== 'AMRL0Attr':
createAMRL0AttrVectors(None, dbTestFn, 100.0, keepSense, sTypes=['test'], vectors=merged, featuresDB=dbfn, maxSents=None,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB, useDistSG=useDistSG )
elif systemName== 'AMRL0Ncat':
createAMRL0NcatVectors(None, dbTestFn, 100.0, keepSense, sTypes=['test'], vectors=merged, featuresDB=dbfn, maxSents=None,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB, useDistSG=useDistSG )
else:
assert('error, invalid system name')
return wordDF
def runKerasNetwork(networkType, vectorDBFn, modelFn, resultsFn, sType='test'):
direc = getSystemPath( 'NNModels' )
mm, ww = modelFn.split('@')
cmd = getSystemPath( 'python' )
cmd = cmd +' AMR_NN_Forward.py -v %s -m %s -w %s -r %s -s %s' % ('../data/' + vectorDBFn, mm, ww, '../results/' + resultsFn, sType)
print direc, cmd
print direc, cmd
print direc, cmd
errorCode = os.system('cd %s; %s' % (direc, cmd) )
if errorCode:
raise ValueError('ERROR!\n non zero error code %d' % errorCode)
exit(1)
if errorCode:
print vectorDBFn, modelFn, resultsFn, sType
raise ValueError('ERROR!\n non zero error code %d' % errorCode)
exit(1)
def runNetwork(networkType, vectorDBFn, modelFn, resultsFn, sType='test'):
if '@' in modelFn:
runKerasNetwork(networkType, vectorDBFn, modelFn, resultsFn, sType)
else:
assert('error, Torch networks are no longer supported')
| mit |
mmottahedi/neuralnilm_prototype | scripts/e351.py | 2 | 6885 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 5000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
one_target_per_seq=False,
n_seq_per_batch=16,
# subsample_target=2,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
# input_padding=8,
lag=0,
classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-4,
learning_rate_changes_by_iteration={
# 200: 1e-2,
# 400: 1e-3,
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
auto_reshape=False,
plotter=CentralOutputPlotter
# plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 512
output_shape = source.output_shape_after_processing()
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 2, # pool over the time axis
'pool_function': T.max
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 2, # pool over the time axis
'pool_function': T.max
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': N,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N // 2,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': output_shape[1] * output_shape[2],
'nonlinearity': sigmoid
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
macks22/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
molpopgen/fwdpy11 | examples/discrete_demography/localadaptation.py | 1 | 7832 | #
# Copyright (C) 2019 Kevin Thornton <krthornt@uci.edu>
#
# This file is part of fwdpy11.
#
# fwdpy11 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# fwdpy11 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with fwdpy11. If not, see <http://www.gnu.org/licenses/>.
#
"""
Local adaptation of a quantitative trait to differing optima.
"""
import argparse
import math
import sys
from collections import namedtuple
import numpy as np
import pandas as pd
import fwdpy11
# Simulations with tree sequence recording need
# to know the max position in a genome. Here,
# we use a length of 1.0. Thus, all mutation
# and recombination events will be uniform
# random variables on the continuous interval
# [0, GENOME_LENGTH).
GENOME_LENGTH = 1.0
# When recording quant-genetic statistics during a simulation,
# we will use this type. Named tuples are extremely efficient,
# and they are easily converted into Pandas DataFrame objects,
# which is very convenient for analysis and output.
Datum = namedtuple("Data", ["generation", "deme", "gbar", "vg", "wbar"])
def make_parser():
"""
Create a command-line interface to the script.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
required = parser.add_argument_group("Required arguments")
required.add_argument("--popsize", "-N", type=int, help="Diploid population size")
required.add_argument(
"--mu", "-m", type=float, help="Mutation rate (per gamete, per generation)"
)
required.add_argument(
"--sigma",
"-s",
type=float,
help="Standard deviation of Gaussian" "distribution of mutational effects",
)
optional = parser.add_argument_group("Optional arguments")
optional.add_argument(
"--rho", type=float, default=1000.0, help="Scaled recombination rate, rho=4Nr"
)
optional.add_argument(
"--VS",
type=float,
default=10.0,
help="Inverse strength of stabilizing selection",
)
optional.add_argument(
"--opt", type=float, default=1.0, help="Value of new phenotypic optimum"
)
optional.add_argument(
"--migrates",
type=float,
nargs=2,
default=None,
help="Migration rates from 0 to 1 and 1 to 0, respectively.",
)
optional.add_argument(
"--time",
type=float,
default=0.1,
help="Amount of time to simulate past" "optimum shift, in units of N",
)
optional.add_argument(
"--plotfile", type=str, default=None, help="File name for plot"
)
optional.add_argument("--seed", type=int, default=42, help="Random number seed.")
return parser
def validate_arguments(args):
"""
Validate input arguments.
Note: this is likely incomplete.
"""
if args.popsize is None:
raise ValueError("popsize cannot be None")
if args.mu < 0:
raise ValueError("mu must be non-negative")
if args.mu is None:
raise ValueError("mu cannot be None")
if args.mu < 0 or math.isfinite(args.mu) is False:
raise ValueError("Mutation rate must be non-negative and finite")
if args.sigma is None:
raise ValueError("sigma cannot be none")
if args.sigma < 0 or math.isfinite(args.sigma) is False:
raise ValueError(
"Std. dev. of distribution of effect sizes"
"must be non-negative and finite"
)
if args.migrates is not None:
for m in args.migrates:
if m < 0 or m > 1:
raise ValueError("migration rates must be 0 <= m <= 1")
def make_migmatrix(migrates):
if migrates is None:
return None
mm = np.zeros(4).reshape(2, 2)
mm[0, 1] = migrates[1]
mm[1, 0] = migrates[0]
rs = np.sum(mm, axis=1)
np.fill_diagonal(mm, 1.0 - rs)
return fwdpy11.MigrationMatrix(mm)
class Recorder(object):
"""
fwdpy11 allows you to define objects that record data
from populations during simulation. Such objects must
be callable, and the easiest way to do things is to
create a class with a __call__ function.
"""
def __init__(self, start):
self.data = []
self.start = start
def __call__(self, pop, recorder):
if pop.generation >= self.start:
# Record mean trait value each generation.
md = np.array(pop.diploid_metadata, copy=False)
demes = np.unique(md["deme"])
for d in demes:
w = np.where(md["deme"] == d)[0]
gbar = md["g"][w].mean()
vg = md["g"][w].var()
wbar = md["w"][w].mean()
self.data.append(Datum(pop.generation, d, gbar, vg, wbar))
def plot_output(data, filename):
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=(9, 3))
gs = gridspec.GridSpec(ncols=3, nrows=1, figure=fig)
ax_gbar = fig.add_subplot(gs[0, 0])
ax_vg = fig.add_subplot(gs[0, 1])
ax_wbar = fig.add_subplot(gs[0, 2])
df = pd.DataFrame(data, columns=Datum._fields)
g = df.groupby(["deme"])
for n, gi in g:
ax_gbar.plot(gi["generation"], gi["gbar"], label="Deme {}".format(n))
ax_vg.plot(gi["generation"], gi["vg"], label="Deme {}".format(n))
ax_wbar.plot(gi["generation"], gi["wbar"], label="Deme {}".format(n))
for ax in [ax_gbar, ax_vg, ax_wbar]:
ax.set_xlabel("Generation")
ax_gbar.set_ylabel(r"$\bar{g}$")
ax_vg.set_ylabel(r"$V(G)$")
ax_wbar.set_ylabel(r"$\bar{w}$")
ax_gbar.legend()
plt.tight_layout()
plt.savefig(filename)
def runsim(args):
"""
Run the simulation.
"""
pop = fwdpy11.DiploidPopulation(2 * args.popsize, GENOME_LENGTH)
np.random.seed(args.seed)
rng = fwdpy11.GSLrng(args.seed)
GSSmo0 = fwdpy11.GSSmo(
[
fwdpy11.Optimum(when=0, optimum=0.0, VS=args.VS),
fwdpy11.Optimum(when=10 * args.popsize, optimum=args.opt, VS=args.VS),
]
)
GSSmo1 = fwdpy11.GSSmo(
[
fwdpy11.Optimum(when=0, optimum=0.0, VS=args.VS),
fwdpy11.Optimum(
when=10 * args.popsize, optimum=-1.0 * args.opt, VS=args.VS
),
]
)
mm = make_migmatrix(args.migrates)
dd = fwdpy11.DiscreteDemography(
mass_migrations=[fwdpy11.move_individuals(0, 0, 1, 0.5)], migmatrix=mm
)
p = {
"nregions": [], # No neutral mutations -- add them later!
"gvalue": [fwdpy11.Additive(2.0, GSSmo0), fwdpy11.Additive(2.0, GSSmo1)],
"sregions": [fwdpy11.GaussianS(0, GENOME_LENGTH, 1, args.sigma)],
"recregions": [fwdpy11.Region(0, GENOME_LENGTH, 1)],
"rates": (0.0, args.mu, args.rho / float(4 * args.popsize)),
# Keep mutations at frequency 1 in the pop if they affect fitness.
"prune_selected": False,
"demography": dd,
"simlen": 10 * args.popsize + int(args.popsize * args.time),
}
params = fwdpy11.ModelParams(**p)
r = Recorder(10 * args.popsize)
fwdpy11.evolvets(rng, pop, params, 100, r, suppress_table_indexing=True)
if args.plotfile is not None:
plot_output(r.data, args.plotfile)
if __name__ == "__main__":
parser = make_parser()
args = parser.parse_args(sys.argv[1:])
validate_arguments(args)
runsim(args)
| gpl-3.0 |
OPU-Surveillance-System/monitoring | master/scripts/planner/solvers/test_penalization_plot.py | 1 | 1040 | import matplotlib.pyplot as plt
with open("test_pen", "r") as f:
data = f.read()
data = data.split("\n")[:-1]
data = [data[i].split(" ") for i in range(0, len(data))]
pen = [float(data[i][0]) for i in range(len(data))]
u = [float(data[i][1]) for i in range(len(data))]
d = [float(data[i][2]) for i in range(len(data))]
gain = [((d[i-1] - d[i])) / (u[i] - u[i - 1]) for i in range(1, len(data))]
gain = [gain[0]] + gain
print(u, d, gain)
fig, ax1 = plt.subplots()
pu, = ax1.plot(pen, u, color="r", label="Uncertainty rate")
ax1.scatter(pen, u, color="k")
#ax1.axhline(9000, color="r", linestyle="--")
#ax1.set_title("Cost evolution according to the number of iterations")
ax1.set_xlabel("Penalization coefficient")
ax1.set_ylabel("Uncertainty rate")
ax2 = ax1.twinx()
pd, = ax2.plot(pen, d, color="b", linestyle="--", label="Distance")
ax2.scatter(pen, d, color="k")
ax2.set_ylabel("Distance")
#ax2.axhline(0.99, color="b", linestyle="--")
#plt.axvline(4000000, color="k",linestyle = ":")
plt.legend(handles=[pu, pd], loc=7)
plt.show()
| mit |
NDManh/numbbo | code-postprocessing/bbob_pproc/pptex.py | 4 | 14442 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Routines for writing TeX for tables."""
from __future__ import absolute_import
import os
import sys
import string
import numpy
from . import toolsstats
from pdb import set_trace
#GLOBAL VARIABLES DEFINITION
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
#conversion of matplotlib elements to LaTeX
latex_marker_map = {'o': r'$\circ$',
'd': r'$\diamondsuit$',
's': r'$\Box$',
'v': r'$\triangledown$',
'*': r'$\star$',
'h': r'$\varhexagon$', # need \usepackage{wasysymb}
'^': r'$\triangle$',
'p': r'$\pentagon$', # need \usepackage{wasysymb}
'H': r'$\hexagon$', # need \usepackage{wasysymb}
'<': r'$\triangleleft$',
'D': r'$\Diamond$',
'>': r'$\triangleright$',
'1': r'$\downY$', # need \usepackage{MnSymbol}
'2': r'$\upY$', # need \usepackage{MnSymbol}
'3': r'$\rightY$', # need \usepackage{MnSymbol}
'4': r'$\leftY$'} # need \usepackage{MnSymbol}
html_marker_map = {
'o': r'○',
'd': r'♢',
's': r'◻',
'v': r'▽',
'*': r'☆',
'h': r'varhexagon',
'^': r'△',
'p': r'pentagon',
'H': r'hexagon',
'<': r'◁',
'D': r'◇',
'>': r'▷',
'1': r'downY',
'2': r'upY',
'3': r'rightY',
'4': r'leftY'}
latex_color_map_old = {
'g': 'green!45!black',
'r': 'red',
'c': 'cyan',
'm': 'magenta',
'y': 'yellow',
'k': 'black',
'b': 'blue'}
latex_color_map = {
'c': 'cyan',
'm': 'magenta',
'y': 'yellow',
'b': 'blue',
'g': 'green',
'#000080': 'NavyBlue',
'r': 'red',
'#ffd700': 'Goldenrod',
'#d02090': 'VioletRed',
'k': 'Black',
'#6495ed': 'CornflowerBlue',
'#ff4500': 'OrangeRed',
'#ffff00': 'Yellow',
'#ff00ff': 'Magenta',
'#bebebe': 'Gray',
'#87ceeb': 'SkyBlue',
'#ffa500': 'Orange',
'#ffc0cb': 'Lavender',
'#4169e1': 'RoyalBlue',
'#228b22': 'ForestGreen',
'#32cd32': 'LimeGreen',
'#9acd32': 'YellowGreen',
'#adff2f': 'GreenYellow'}
#CLASS DEFINITION
class Error(Exception):
""" Base class for errors. """
pass
class WrongInputSizeError(Error):
"""Error if an array has the wrong size for the following operation.
:returns: message containing the size of the array and the required
size.
"""
def __init__(self,arrName, arrSize, reqSize):
self.arrName = arrName
self.arrSize = arrSize
self.reqSize = reqSize
def __str__(self):
message = 'The size of %s is %s. One dimension must be of length %s!' %\
(self.arrName,str(self.arrSize), str(self.reqSize))
return repr(message)
#TOP LEVEL METHODS
def color_to_latex(color):
try:
res = '\color{%s}' % latex_color_map[color]
except KeyError, err:
try:
float(color)
res = '\color[gray]{%s}' % color
except ValueError:
raise err
return res
def marker_to_latex(marker):
return latex_marker_map[marker]
def marker_to_html(marker):
return html_marker_map[marker]
def numtotext(n):
"""Returns a text from a positive integer.
Is to be used for generating command names: they cannot include number
characters.
WARNING: n should not be larger than (53*52)-1 = 2755 for the moment
"""
if n < 52:
str = alphabet[n]
elif n < 53*52:
str = alphabet[(n-52)//52] + alphabet[n-n//52*52]
else:
raise Exception('Cannot handle a number of algorithms that large.')
return str
def writeLabels(label):
"""Format text to be output by LaTeX."""
return label.replace('_', r'\_')
def writeFEvals(fevals, precision='.2'):
"""Returns string representation of a number of function evaluations."""
if numpy.isinf(fevals):
return r'$\infty$'
tmp = (('%' + precision + 'g') % fevals)
res = tmp.split('e')
if len(res) > 1:
res[1] = '%d' % int(res[1])
res = '%s' % 'e'.join(res)
pr2 = str(float(precision) + .2)
#res2 = (('%' + pr2 + 'g') % fevals)
res2 = (('%' + pr2 + 'g') % float(tmp))
# To have the same number of significant digits.
if len(res) >= len(res2):
res = res2
else:
res = res[0]
return res
def writeFEvals2(fevals, precision=2, maxdigits=None, isscientific=False):
"""Returns string representation of a number of function evaluations.
This method is supposed to be used for filling up a LaTeX tabular.
To address the eventual need to keep their string representation
short, the method here proposes the shortest representation between
the full representation and a modified scientific representation.
:param float fevals:
:param int precision: number of significant digits
:param int maxdigits:
:param bool isscientific:
Examples:
====== ========= =====================
Number Precision Output Representation
====== ========= =====================
102345 2 digits 1.0e5
====== ========= =====================
"""
#Printf:
# %[flags][width][.precision][length]specifier
assert not numpy.isnan(fevals)
if numpy.isinf(fevals):
return r'$\infty$'
if maxdigits is None:
precision = int(precision)
#repr1 is the alternative scientific notation
#repr2 is the full notation but with a number of significant digits given
#by the variable precision.
res = (('%.' + str(precision-1) + 'e') % fevals)
repr1 = res
tmp = repr1.split('e')
tmp[1] = '%d' % int(tmp[1]) # Drop the eventual plus sign and trailing zero
repr1 = 'e'.join(tmp)
repr2 = (('%.' + str(precision+1) + 'f') % float(res)).rstrip('0').rstrip('.')
#set_trace()
if len(repr1) > len(repr2) and not isscientific:
return repr2
return repr1
else:
# takes precedence, in this case we expect a positive integer
if not isinstance(fevals, int):
return '%d' % fevals
repr2 = '%.0f' % fevals
if len(repr2) > maxdigits:
precision = maxdigits - 4
# 1) one symbol for the most significant digit
# 2) one for the dot, 3) one for the e, 4) one for the exponent
if numpy.log10(fevals) > 10:
precision -= 1
if precision < 0:
precision = 0
repr1 = (('%.' + str(precision) + 'e') % fevals).split('e')
repr1[1] = '%d' % int(repr1[1]) # drop the sign and trailing zero
repr1 = 'e'.join(repr1)
return repr1
return repr2
def writeFEvalsMaxSymbols(fevals, maxsymbols, isscientific=False):
"""Return the smallest string representation of a number.
This method is only concerned with the maximum number of significant
digits.
Two alternatives:
1) modified scientific notation (without the trailing + and zero in
the exponent)
2) float notation
:returns: string representation of a number of function evaluations
or ERT.
"""
#Compared to writeFEvals2?
#Printf:
# %[flags][width][.precision][length]specifier
assert not numpy.isnan(fevals)
if numpy.isinf(fevals):
return r'$\infty$'
#repr1 is the alternative scientific notation
#repr2 is the full notation but with a number of significant digits given
#by the variable precision.
# modified scientific notation:
#smallest representation of the decimal part
#drop + and starting zeros of the exponent part
repr1 = (('%.' + str(maxsymbols) + 'e') % fevals)
size1 = len(repr1)
tmp = repr1.split('e', 1)
tmp2 = tmp[-1].lstrip('+-0')
if float(tmp[-1]) < 0:
tmp2 = '-' + tmp2
tmp[-1] = tmp2
remainingsymbols = max(maxsymbols - len(tmp2) - 2, 0)
tmp[0] = (('%.' + str(remainingsymbols) + 'f') % float(tmp[0]))
repr1 = 'e'.join(tmp)
#len(repr1) <= maxsymbols is not always the case but should be most usual
tmp = '%.0f' % fevals
remainingsymbols = max(maxsymbols - len(tmp), 0)
repr2 = (('%.' + str(remainingsymbols) + 'f') % fevals)
tmp = repr2.split('.', 1)
if len(tmp) > 1:
tmp[-1] = tmp[-1].rstrip('0')
repr2 = '.'.join(tmp)
repr2 = repr2.rstrip('.')
#set_trace()
if len(repr1)-repr1.count('.') < len(repr2)-repr2.count('.') or isscientific:
return repr1
#tmp1 = '%4.0f' % bestalgdata[-1]
#tmp2 = ('%2.2g' % bestalgdata[-1]).split('e', 1)
#if len(tmp2) > 1:
# tmp2[-1] = tmp2[-1].lstrip('+0')
# tmp2 = 'e'.join(tmp2)
# tmp = tmp1
# if len(tmp1) >= len(tmp2):
# tmp = tmp2
# curline.append(r'\multicolumn{2}{c|}{%s}' % tmp)
return repr2
def writeFEvalsMaxPrec(entry, SIG, maxfloatrepr=1e5):
"""Return a string representation of a number.
Two alternatives:
1) float notation with a precision smaller or equal to SIG (if the
entry is one, then the result is 1).
2) if the number is larger or equal to maxfloatrepr, a modified
scientific notation (without the trailing + and zero in the
exponent)
:returns: string representation of a number of function evaluations
or ERT.
"""
#CAVE: what if entry is smaller than 10**(-SIG)?
#Printf:
# %[flags][width][.precision][length]specifier
assert not numpy.isnan(entry)
if numpy.isinf(entry):
return r'$\infty$'
if entry == 1.:
res = '1'
elif entry < maxfloatrepr:
# the full notation but with given maximum precision
corr = 1 if abs(entry) < 1 else 0
tmp = '%.0f' % entry
remainingsymbols = max(SIG - len(tmp) + corr, 0)
res = (('%.' + str(remainingsymbols) + 'f') % entry)
else:
# modified scientific notation:
#smallest representation of the decimal part
#drop + and starting zeros of the exponent part
res = (('%.' + str(max([0, SIG - 1])) + 'e') % entry)
size1 = len(res)
tmp = res.split('e', 1)
tmp2 = tmp[-1].lstrip('+-0')
if float(tmp[-1]) < 0:
tmp2 = '-' + tmp2
tmp[-1] = tmp2
if len(tmp) > 1 and tmp[-1]:
res = 'e'.join(tmp)
else:
res = tmp[0]
return res
def tableLaTeX(table, spec, extraeol=()):
"""Generates a tabular from a sequence of sequence of strings.
:param seq table: sequence of sequence of strings
:param string spec: string for table specification, see
http://en.wikibooks.org/wiki/LaTeX/Tables#The_tabular_environment
:param seq extraeol: sequence of string the same length as the table
(same number of lines) which are added at the
end of each line.
:returns: sequence of strings of a LaTeX tabular.
"""
if not extraeol:
extraeol = len(table) * ['']
# TODO: check that spec and extraeol have the right format?
res = [r'\begin{tabular}{%s}' % spec]
for i, line in enumerate(table[:-1]):
curline = ' & '.join(line) + r'\\' + extraeol[i]
# curline = ' & '.join(line) + r'\\\hline' + extraeol[i]
res.append(curline)
res.append(' & '.join(table[-1]) + extraeol[-1])
res.append(r'\end{tabular}')
res = '\n'.join(res)
return res
def tableXLaTeX(table, spec, extraeol=()):
"""Generates a tabular from a sequence of sequence of strings.
:param seq table: sequence of sequence of strings
:param string spec: string for table specification, see
http://en.wikibooks.org/wiki/LaTeX/Tables#The_tabular_environment
:param seq extraeol: sequence of string the same length as the table
(same number of lines) which are added at the
end of each line.
:returns: sequence of strings of a LaTeX tabular.
"""
if not extraeol:
extraeol = len(table) * ['']
# TODO: check that spec and extraeol have the right format?
if 1 < 3:
res = [r'\begin{tabularx}{1.0\textwidth}{%s}' % spec]
for i, line in enumerate(table[:-1]):
curline = ' & '.join(line) + r'\\' + extraeol[i]
res.append(curline)
else: # format with hline, when is it needed, for non-paper tables?
res = [r'\begin{tabularx}{1.3\textwidth}{%s}' % spec]
for i, line in enumerate(table[:-1]):
curline = ' & '.join(line) + r'\\\hline' + extraeol[i]
res.append(curline)
res.append(' & '.join(table[-1]) + extraeol[-1])
res.append(r'\end{tabularx}')
res = '\n'.join(res)
return res
def tableLaTeXStar(table, width, spec, extraeol=()):
"""Generates a tabular\* from a sequence of sequence of strings
:param seq table: sequence of sequence of strings
:param string width: string for the width of the table
:param strin spec: string for table specification, see
http://en.wikibooks.org/wiki/LaTeX/Tables#The_tabular_environment
:param seq extraeol: sequence of string the same length as the table
(same number of lines) which are added at the
end of each line.
"""
if not extraeol:
extraeol = len(table) * ['']
# TODO: check that spec and extraeol have the right format?
res = [r'\begin{tabular*}{%s}{%s}' % (width, spec)]
for i, line in enumerate(table[:-1]):
curline = ' & '.join(line) + r'\\' + extraeol[i]
res.append(curline)
res.append(' & '.join(table[-1]) + extraeol[-1])
res.append(r'\end{tabular*}')
res = '\n'.join(res)
return res
class DataTable(list):
pass
| bsd-3-clause |
huobaowangxi/scikit-learn | sklearn/calibration.py | 137 | 18876 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
from __future__ import division
import inspect
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .cross_validation import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is it is assumed that base_estimator has been
fitted already and all data is used for calibration. Note that
data for fitting the classifier and for calibrating it must be disjpint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach. It is not advised to use isotonic calibration
with too few calibration samples (<<1000) since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer or cross-validation generator or "prefit", optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that we each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, X, y, classifier=True)
arg_names = inspect.getargspec(base_estimator.fit)[0]
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in arg_names):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv:
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classfiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| bsd-3-clause |
pbrod/scipy | scipy/special/basic.py | 3 | 70421 | #
# Author: Travis Oliphant, 2002
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
import math
from scipy._lib.six import xrange
from numpy import (pi, asarray, floor, isscalar, iscomplex, real,
imag, sqrt, where, mgrid, sin, place, issubdtype,
extract, less, inexact, nan, zeros, sinc)
from . import _ufuncs as ufuncs
from ._ufuncs import (ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma,
psi, _zeta, hankel1, hankel2, yv, kv, ndtri,
poch, binom, hyp0f1)
from . import specfun
from . import orthogonal
from ._comb import _comb_int
__all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',
'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula',
'bi_zeros', 'clpmn', 'comb', 'digamma', 'diric', 'ellipk',
'erf_zeros', 'erfcinv', 'erfinv', 'euler', 'factorial',
'factorialk', 'factorial2', 'fresnel_zeros',
'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'h1vp',
'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros',
'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros',
'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv',
'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',
'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri',
'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', 'perm',
'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn',
'sinc', 'sph_in', 'sph_inkn',
'sph_jn', 'sph_jnyn', 'sph_kn', 'sph_yn', 'y0_zeros', 'y1_zeros',
'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta']
def diric(x, n):
"""Periodic sinc function, also called the Dirichlet function.
The Dirichlet function is defined as::
diric(x) = sin(x * n/2) / (n * sin(x / 2)),
where `n` is a positive integer.
Parameters
----------
x : array_like
Input data
n : int
Integer defining the periodicity.
Returns
-------
diric : ndarray
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-8*np.pi, 8*np.pi, num=201)
>>> plt.figure(figsize=(8, 8));
>>> for idx, n in enumerate([2, 3, 4, 9]):
... plt.subplot(2, 2, idx+1)
... plt.plot(x, special.diric(x, n))
... plt.title('diric, n={}'.format(n))
>>> plt.show()
The following example demonstrates that `diric` gives the magnitudes
(modulo the sign and scaling) of the Fourier coefficients of a
rectangular pulse.
Suppress output of values that are effectively 0:
>>> np.set_printoptions(suppress=True)
Create a signal `x` of length `m` with `k` ones:
>>> m = 8
>>> k = 3
>>> x = np.zeros(m)
>>> x[:k] = 1
Use the FFT to compute the Fourier transform of `x`, and
inspect the magnitudes of the coefficients:
>>> np.abs(np.fft.fft(x))
array([ 3. , 2.41421356, 1. , 0.41421356, 1. ,
0.41421356, 1. , 2.41421356])
Now find the same values (up to sign) using `diric`. We multiply
by `k` to account for the different scaling conventions of
`numpy.fft.fft` and `diric`:
>>> theta = np.linspace(0, 2*np.pi, m, endpoint=False)
>>> k * special.diric(theta, k)
array([ 3. , 2.41421356, 1. , -0.41421356, -1. ,
-0.41421356, 1. , 2.41421356])
"""
x, n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape, ytype)
# empirical minval for 32, 64 or 128 bit float computations
# where sin(x/2) < minval, result is fixed at +1 or -1
if np.finfo(ytype).eps < 1e-18:
minval = 1e-11
elif np.finfo(ytype).eps < 1e-15:
minval = 1e-7
else:
minval = 1e-3
mask1 = (n <= 0) | (n != floor(n))
place(y, mask1, nan)
x = x / 2
denom = sin(x)
mask2 = (1-mask1) & (abs(denom) < minval)
xsub = extract(mask2, x)
nsub = extract(mask2, n)
zsub = xsub / pi
place(y, mask2, pow(-1, np.round(zsub)*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask, x)
nsub = extract(mask, n)
dsub = extract(mask, denom)
place(y, mask, sin(nsub*xsub)/(nsub*dsub))
return y
def jnjnp_zeros(nt):
"""Compute zeros of integer-order Bessel functions Jn and Jn'.
Results are arranged in order of the magnitudes of the zeros.
Parameters
----------
nt : int
Number (<=1200) of zeros to compute
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n, m, t, zo = specfun.jdzo(nt)
return zo[1:nt+1], n[:nt], m[:nt], t[:nt]
def jnyn_zeros(n, nt):
"""Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
Returns 4 arrays of length `nt`, corresponding to the first `nt` zeros of
Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively.
Parameters
----------
n : int
Order of the Bessel functions
nt : int
Number (<=1200) of zeros to compute
See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n), nt)
def jn_zeros(n, nt):
"""Compute zeros of integer-order Bessel function Jn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[0]
def jnp_zeros(n, nt):
"""Compute zeros of integer-order Bessel function derivative Jn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[1]
def yn_zeros(n, nt):
"""Compute zeros of integer-order Bessel function Yn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[2]
def ynp_zeros(n, nt):
"""Compute zeros of integer-order Bessel function derivative Yn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[3]
def y0_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z0n : ndarray
Location of nth zero of Y0(z)
y0pz0n : ndarray
Value of derivative Y0'(z0) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1n : ndarray
Location of nth zero of Y1(z)
y1pz1n : ndarray
Value of derivative Y1'(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1p_zeros(nt, complex=False):
"""Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
The values are given by Y1(z1) at each z1 where Y1'(z1)=0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1pn : ndarray
Location of nth zero of Y1'(z)
y1z1pn : ndarray
Value of derivative Y1(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = not complex
return specfun.cyzo(nt, kf, kc)
def _bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v, z) = J(v, z), Y(v, z), H1(v, z), H2(v, z), phase = -1
# L(v, z) = I(v, z) or exp(v*pi*i)K(v, z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
v = asarray(v)
p = 1.0
s = L(v-n, z)
for i in xrange(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
bessel_diff_formula = np.deprecate(_bessel_diff_formula,
message="bessel_diff_formula is a private function, do not use it!")
def jvp(v, z, n=1):
"""Compute nth derivative of Bessel function Jv(z) with respect to `z`.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return jv(v, z)
else:
return _bessel_diff_formula(v, z, n, jv, -1)
def yvp(v, z, n=1):
"""Compute nth derivative of Bessel function Yv(z) with respect to `z`.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return yv(v, z)
else:
return _bessel_diff_formula(v, z, n, yv, -1)
def kvp(v, z, n=1):
"""Compute nth derivative of real-order modified Bessel function Kv(z)
Kv(z) is the modified Bessel function of the second kind.
Derivative is calculated with respect to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : array_like of complex
Argument at which to evaluate the derivative
n : int
Order of derivative. Default is first derivative.
Returns
-------
out : ndarray
The results
Examples
--------
Calculate multiple values at order 5:
>>> from scipy.special import kvp
>>> kvp(5, (1, 2, 3+5j))
array([-1849.0354+0.j , -25.7735+0.j , -0.0307+0.0875j])
Calculate for a single value at multiple orders:
>>> kvp((4, 4.5, 5), 1)
array([ -184.0309, -568.9585, -1849.0354])
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.29.E5
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return kv(v, z)
else:
return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1)
def ivp(v, z, n=1):
"""Compute nth derivative of modified Bessel function Iv(z) with respect
to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : array_like of complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.29.E5
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return iv(v, z)
else:
return _bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v, z, n=1):
"""Compute nth derivative of Hankel function H1v(z) with respect to `z`.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel1(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel1, -1)
def h2vp(v, z, n=1):
"""Compute nth derivative of Hankel function H2v(z) with respect to `z`.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel2(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel2, -1)
@np.deprecate(message="scipy.special.sph_jn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_jn instead. "
"Note that the new function has a different signature.")
def sph_jn(n, z):
"""Compute spherical Bessel function jn(z) and derivative.
This function computes the value and first derivative of jn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of jn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
See also
--------
spherical_jn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, jn, jnp = specfun.sphj(n1, z)
return jn[:(n+1)], jnp[:(n+1)]
@np.deprecate(message="scipy.special.sph_yn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_yn instead. "
"Note that the new function has a different signature.")
def sph_yn(n, z):
"""Compute spherical Bessel function yn(z) and derivative.
This function computes the value and first derivative of yn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of yn to compute
z : complex
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
See also
--------
spherical_yn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, yn, ynp = specfun.sphy(n1, z)
return yn[:(n+1)], ynp[:(n+1)]
@np.deprecate(message="scipy.special.sph_jnyn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_jn and "
"scipy.special.spherical_yn instead. "
"Note that the new function has a different signature.")
def sph_jnyn(n, z):
"""Compute spherical Bessel functions jn(z) and yn(z) and derivatives.
This function computes the value and first derivative of jn(z) and yn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of jn and yn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
See also
--------
spherical_jn
spherical_yn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, yn, ynp = specfun.sphy(n1, z)
nm, jn, jnp = specfun.sphj(n1, z)
return jn[:(n+1)], jnp[:(n+1)], yn[:(n+1)], ynp[:(n+1)]
@np.deprecate(message="scipy.special.sph_in is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_in instead. "
"Note that the new function has a different signature.")
def sph_in(n, z):
"""Compute spherical Bessel function in(z) and derivative.
This function computes the value and first derivative of in(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of in to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
See also
--------
spherical_in
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, In, Inp = specfun.sphi(n1, z)
return In[:(n+1)], Inp[:(n+1)]
@np.deprecate(message="scipy.special.sph_kn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_kn instead. "
"Note that the new function has a different signature.")
def sph_kn(n, z):
"""Compute spherical Bessel function kn(z) and derivative.
This function computes the value and first derivative of kn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of kn to compute
z : complex
Argument at which to evaluate
Returns
-------
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
See also
--------
spherical_kn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, kn, knp = specfun.sphk(n1, z)
return kn[:(n+1)], knp[:(n+1)]
@np.deprecate(message="scipy.special.sph_inkn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_in and "
"scipy.special.spherical_kn instead. "
"Note that the new function has a different signature.")
def sph_inkn(n, z):
"""Compute spherical Bessel functions in(z), kn(z), and derivatives.
This function computes the value and first derivative of in(z) and kn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of in and kn to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
See also
--------
spherical_in
spherical_kn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, In, Inp = specfun.sphi(n1, z)
nm, kn, knp = specfun.sphk(n1, z)
return In[:(n+1)], Inp[:(n+1)], kn[:(n+1)], knp[:(n+1)]
def riccati_jn(n, x):
r"""Compute Ricatti-Bessel function of the first kind and its derivative.
The Ricatti-Bessel function of the first kind is defined as :math:`x
j_n(x)`, where :math:`j_n` is the spherical Bessel function of the first
kind of order :math:`n`.
This function computes the value and first derivative of the
Ricatti-Bessel function for all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(x), ..., jn(x)
jnp : ndarray
First derivative j0'(x), ..., jn'(x)
Notes
-----
The computation is carried out via backward recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rctj(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def riccati_yn(n, x):
"""Compute Ricatti-Bessel function of the second kind and its derivative.
The Ricatti-Bessel function of the second kind is defined as :math:`x
y_n(x)`, where :math:`y_n` is the spherical Bessel function of the second
kind of order :math:`n`.
This function computes the value and first derivative of the function for
all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(x), ..., yn(x)
ynp : ndarray
First derivative y0'(x), ..., yn'(x)
Notes
-----
The computation is carried out via ascending recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rcty(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def erfinv(y):
"""Inverse function for erf.
"""
return ndtri((y+1)/2.0)/sqrt(2)
def erfcinv(y):
"""Inverse function for erfc.
"""
return -ndtri(0.5*y)/sqrt(2)
def erf_zeros(nt):
"""Compute nt complex zeros of error function erf(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.cerzo(nt)
def fresnelc_zeros(nt):
"""Compute nt complex zeros of cosine Fresnel integral C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(1, nt)
def fresnels_zeros(nt):
"""Compute nt complex zeros of sine Fresnel integral S(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt)
def fresnel_zeros(nt):
"""Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt), specfun.fcszo(1, nt)
def assoc_laguerre(x, n, k=0.0):
"""Compute the generalized (associated) Laguerre polynomial of degree n and order k.
The polynomial :math:`L^{(k)}_n(x)` is orthogonal over ``[0, inf)``,
with weighting function ``exp(-x) * x**k`` with ``k > -1``.
Notes
-----
`assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with
reversed argument order ``(x, n, k=0.0) --> (n, k, x)``.
"""
return orthogonal.eval_genlaguerre(n, k, x)
digamma = psi
def polygamma(n, x):
"""Polygamma function n.
This is the nth derivative of the digamma (psi) function.
Parameters
----------
n : array_like of int
The order of the derivative of `psi`.
x : array_like
Where to evaluate the polygamma function.
Returns
-------
polygamma : ndarray
The result.
Examples
--------
>>> from scipy import special
>>> x = [2, 3, 25.5]
>>> special.polygamma(1, x)
array([ 0.64493407, 0.39493407, 0.03999467])
>>> special.polygamma(0, x) == special.psi(x)
array([ True, True, True], dtype=bool)
"""
n, x = asarray(n), asarray(x)
fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x)
return where(n == 0, psi(x), fac2)
def mathieu_even_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the even solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz
.. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z
This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even
input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input
m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Ak : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/28.4#i
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m < 0):
raise ValueError("m must be an integer >=0.")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 1
m = int(floor(m))
if m % 2:
kd = 2
a = mathieu_a(m, q)
fc = specfun.fcoef(kd, m, q, a)
return fc[:km]
def mathieu_odd_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the odd solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z
.. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z
This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even
input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd
input m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Bk : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m <= 0):
raise ValueError("m must be an integer > 0")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 4
m = int(floor(m))
if m % 2:
kd = 3
b = mathieu_b(m, q)
fc = specfun.fcoef(kd, m, q, b)
return fc[:km]
def lpmn(m, n, z):
"""Sequence of associated Legendre functions of the first kind.
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
This function takes a real argument ``z``. For complex arguments ``z``
use clpmn instead.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float
Input value.
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
See Also
--------
clpmn: associated Legendre functions of the first kind for complex z
Notes
-----
In the interval (-1, 1), Ferrer's function of the first kind is
returned. The phase convention used for the intervals (1, inf)
and (-inf, -1) is such that the result is always real.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.3
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if iscomplex(z):
raise ValueError("Argument must be real. Use clpmn instead.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
with ufuncs.errstate(all='ignore'):
if abs(z) < 1:
# Ferrer function; DLMF 14.9.3
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
# Match to clpmn; DLMF 14.9.13
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
else:
mp = m
p, pd = specfun.lpmn(mp, n, z)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def clpmn(m, n, z, type=3):
"""Associated Legendre function of the first kind for complex arguments.
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float or complex
Input value.
type : int, optional
takes values 2 or 3
2: cut on the real axis ``|x| > 1``
3: cut on the real axis ``-1 < x < 1`` (default)
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders ``0..m`` and degrees ``0..n``
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders ``0..m`` and degrees ``0..n``
See Also
--------
lpmn: associated Legendre functions of the first kind for real z
Notes
-----
By default, i.e. for ``type=3``, phase conventions are chosen according
to [1]_ such that the function is analytic. The cut lies on the interval
(-1, 1). Approaching the cut from above or below in general yields a phase
factor with respect to Ferrer's function of the first kind
(cf. `lpmn`).
For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values
on the interval (-1, 1) in the complex plane yields Ferrer's function
of the first kind.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.21
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if not(type == 2 or type == 3):
raise ValueError("type must be either 2 or 3.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
with ufuncs.errstate(all='ignore'):
if type == 2:
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
else:
mp = m
p, pd = specfun.clpmn(mp, n, real(z), imag(z), type)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def lqmn(m, n, z):
"""Sequence of associated Legendre functions of the second kind.
Computes the associated Legendre function of the second kind of order m and
degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and
``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : complex
Input value.
Returns
-------
Qmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Qmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(m) or (m < 0):
raise ValueError("m must be a non-negative integer.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
m = int(m)
n = int(n)
# Ensure neither m nor n == 0
mm = max(1, m)
nn = max(1, n)
if iscomplex(z):
q, qd = specfun.clqmn(mm, nn, z)
else:
q, qd = specfun.lqmn(mm, nn, z)
return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)]
def bernoulli(n):
"""Bernoulli numbers B0..Bn (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.bernob(int(n1))[:(n+1)]
def euler(n):
"""Euler numbers E0..En (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.eulerb(n1)[:(n+1)]
def lpn(n, z):
"""Legendre function of the first kind.
Compute sequence of Legendre functions of the first kind (polynomials),
Pn(z) and derivatives for all degrees from 0 to n (inclusive).
See also special.legendre for polynomial class.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
pn, pd = specfun.clpn(n1, z)
else:
pn, pd = specfun.lpn(n1, z)
return pn[:(n+1)], pd[:(n+1)]
def lqn(n, z):
"""Legendre function of the second kind.
Compute sequence of Legendre functions of the second kind, Qn(z) and
derivatives for all degrees from 0 to n (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
qn, qd = specfun.clqn(n1, z)
else:
qn, qd = specfun.lqnb(n1, z)
return qn[:(n+1)], qd[:(n+1)]
def ai_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Ai and its derivative.
Computes the first `nt` zeros, `a`, of the Airy function Ai(x);
first `nt` zeros, `ap`, of the derivative of the Airy function Ai'(x);
the corresponding values Ai(a');
and the corresponding values Ai'(a).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
a : ndarray
First `nt` zeros of Ai(x)
ap : ndarray
First `nt` zeros of Ai'(x)
ai : ndarray
Values of Ai(x) evaluated at first `nt` zeros of Ai'(x)
aip : ndarray
Values of Ai'(x) evaluated at first `nt` zeros of Ai(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 1
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def bi_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Bi and its derivative.
Computes the first `nt` zeros, b, of the Airy function Bi(x);
first `nt` zeros, b', of the derivative of the Airy function Bi'(x);
the corresponding values Bi(b');
and the corresponding values Bi'(b).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
b : ndarray
First `nt` zeros of Bi(x)
bp : ndarray
First `nt` zeros of Bi'(x)
bi : ndarray
Values of Bi(x) evaluated at first `nt` zeros of Bi'(x)
bip : ndarray
Values of Bi'(x) evaluated at first `nt` zeros of Bi(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 2
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def lmbda(v, x):
r"""Jahnke-Emden Lambda function, Lambdav(x).
This function is defined as [2]_,
.. math:: \Lambda_v(x) = \Gamma(v+1) \frac{J_v(x)}{(x/2)^v},
where :math:`\Gamma` is the gamma function and :math:`J_v` is the
Bessel function of the first kind.
Parameters
----------
v : float
Order of the Lambda function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
vl : ndarray
Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dl : ndarray
Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] Jahnke, E. and Emde, F. "Tables of Functions with Formulae and
Curves" (4th ed.), Dover, 1945
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (v < 0):
raise ValueError("argument must be > 0.")
n = int(v)
v0 = v - n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
if (v != floor(v)):
vm, vl, dl = specfun.lamv(v1, x)
else:
vm, vl, dl = specfun.lamn(v1, x)
return vl[:(n+1)], dl[:(n+1)]
def pbdv_seq(v, x):
"""Parabolic cylinder functions Dv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbdv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbvv_seq(v, x):
"""Parabolic cylinder functions Vv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbvv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbdn_seq(n, z):
"""Parabolic cylinder functions Dn(z) and derivatives.
Parameters
----------
n : int
Order of the parabolic cylinder function
z : complex
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_i(z), for i=0, ..., i=n.
dp : ndarray
Derivatives D_i'(z), for i=0, ..., i=n.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (floor(n) != n):
raise ValueError("n must be an integer.")
if (abs(n) <= 1):
n1 = 1
else:
n1 = n
cpb, cpd = specfun.cpbdn(n1, z)
return cpb[:n1+1], cpd[:n1+1]
def ber_zeros(nt):
"""Compute nt zeros of the Kelvin function ber(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 1)
def bei_zeros(nt):
"""Compute nt zeros of the Kelvin function bei(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 2)
def ker_zeros(nt):
"""Compute nt zeros of the Kelvin function ker(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 3)
def kei_zeros(nt):
"""Compute nt zeros of the Kelvin function kei(x).
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 4)
def berp_zeros(nt):
"""Compute nt zeros of the Kelvin function ber'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 5)
def beip_zeros(nt):
"""Compute nt zeros of the Kelvin function bei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 6)
def kerp_zeros(nt):
"""Compute nt zeros of the Kelvin function ker'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 7)
def keip_zeros(nt):
"""Compute nt zeros of the Kelvin function kei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 8)
def kelvin_zeros(nt):
"""Compute nt zeros of all Kelvin functions.
Returned in a length-8 tuple of arrays of length nt. The tuple contains
the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei').
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return (specfun.klvnzo(nt, 1),
specfun.klvnzo(nt, 2),
specfun.klvnzo(nt, 3),
specfun.klvnzo(nt, 4),
specfun.klvnzo(nt, 5),
specfun.klvnzo(nt, 6),
specfun.klvnzo(nt, 7),
specfun.klvnzo(nt, 8))
def pro_cv_seq(m, n, c):
"""Characteristic values for prolate spheroidal wave functions.
Compute a sequence of characteristic values for the prolate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, 1)[1][:maxL]
def obl_cv_seq(m, n, c):
"""Characteristic values for oblate spheroidal wave functions.
Compute a sequence of characteristic values for the oblate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, -1)[1][:maxL]
def ellipk(m):
r"""Complete elliptic integral of the first kind.
This function is defined as
.. math:: K(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
Returns
-------
K : array_like
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`, which this
function calls.
The parameterization in terms of :math:`m` follows that of section
17.2 in [1]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind around m = 1
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
return ellipkm1(1 - asarray(m))
def agm(a, b):
"""Arithmetic, Geometric Mean.
Start with a_0=a and b_0=b and iteratively compute
a_{n+1} = (a_n+b_n)/2
b_{n+1} = sqrt(a_n*b_n)
until a_n=b_n. The result is agm(a, b)
agm(a, b)=agm(b, a)
agm(a, a) = a
min(a, b) < agm(a, b) < max(a, b)
"""
s = a + b + 0.0
return (pi / 4) * s / ellipkm1(4 * a * b / s ** 2)
def comb(N, k, exact=False, repetition=False):
"""The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
repetition : bool, optional
If `repetition` is True, then the number of combinations with
repetition is computed.
Returns
-------
val : int, float, ndarray
The total number of combinations.
See Also
--------
binom : Binomial coefficient ufunc
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import comb
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> comb(n, k, exact=False)
array([ 120., 210.])
>>> comb(10, 3, exact=True)
120L
>>> comb(10, 3, exact=True, repetition=True)
220L
"""
if repetition:
return comb(N + k - 1, k, exact)
if exact:
return _comb_int(N, k)
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = binom(N, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def perm(N, k, exact=False):
"""Permutations of N things taken k at a time, i.e., k-permutations of N.
It's also known as "partial permutations".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, ndarray
The number of k-permutations of N.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import perm
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> perm(n, k)
array([ 720., 5040.])
>>> perm(10, 3, exact=True)
720
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for i in xrange(N - k + 1, N + 1):
val *= i
return val
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = poch(N - k + 1, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
# http://stackoverflow.com/a/16327037/125507
def _range_prod(lo, hi):
"""
Product of a range of numbers.
Returns the product of
lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi
= hi! / (lo-1)!
Breaks into smaller products first for speed:
_range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9))
"""
if lo + 1 < hi:
mid = (hi + lo) // 2
return _range_prod(lo, mid) * _range_prod(mid + 1, hi)
if lo == hi:
return lo
return lo * hi
def factorial(n, exact=False):
"""
The factorial of a number or array of numbers.
The factorial of non-negative integer `n` is the product of all
positive integers less than or equal to `n`::
n! = n * (n - 1) * (n - 2) * ... * 1
Parameters
----------
n : int or array_like of ints
Input values. If ``n < 0``, the return value is 0.
exact : bool, optional
If True, calculate the answer exactly using long integer arithmetic.
If False, result is approximated in floating point rapidly using the
`gamma` function.
Default is False.
Returns
-------
nf : float or int or ndarray
Factorial of `n`, as integer or float depending on `exact`.
Notes
-----
For arrays with ``exact=True``, the factorial is computed only once, for
the largest input, with each other result computed in the process.
The output dtype is increased to ``int64`` or ``object`` if necessary.
With ``exact=False`` the factorial is approximated using the gamma
function:
.. math:: n! = \\Gamma(n+1)
Examples
--------
>>> from scipy.special import factorial
>>> arr = np.array([3, 4, 5])
>>> factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> factorial(arr, exact=True)
array([ 6, 24, 120])
>>> factorial(5, exact=True)
120L
"""
if exact:
if np.ndim(n) == 0:
return 0 if n < 0 else math.factorial(n)
else:
n = asarray(n)
un = np.unique(n).astype(object)
# Convert to object array of long ints if np.int can't handle size
if un[-1] > 20:
dt = object
elif un[-1] > 12:
dt = np.int64
else:
dt = np.int
out = np.empty_like(n, dtype=dt)
# Handle invalid/trivial values
un = un[un > 1]
out[n < 2] = 1
out[n < 0] = 0
# Calculate products of each range of numbers
if un.size:
val = math.factorial(un[0])
out[n == un[0]] = val
for i in xrange(len(un) - 1):
prev = un[i] + 1
current = un[i + 1]
val *= _range_prod(prev, current)
out[n == current] = val
return out
else:
n = asarray(n)
vals = gamma(n + 1)
return where(n >= 0, vals, 0)
def factorial2(n, exact=False):
"""Double factorial.
This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5
* 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> from scipy.special import factorial2
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0
if n <= 0:
return 1
val = 1
for k in xrange(n, 0, -2):
val *= k
return val
else:
n = asarray(n)
vals = zeros(n.shape, 'd')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1, n)
evenn = extract(cond2, n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals, cond1, gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5))
place(vals, cond2, gamma(nd2e + 1) * pow(2.0, nd2e))
return vals
def factorialk(n, k, exact=True):
"""Multifactorial of n of order k, n(!!...!).
This is the multifactorial of n skipping k values. For example,
factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1
In particular, for any integer ``n``, we have
factorialk(n, 1) = factorial(n)
factorialk(n, 2) = factorial2(n)
Parameters
----------
n : int
Calculate multifactorial. If `n` < 0, the return value is 0.
k : int
Order of multifactorial.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multifactorial of `n`.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> from scipy.special import factorialk
>>> factorialk(5, 1, exact=True)
120L
>>> factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0
if n <= 0:
return 1
val = 1
for j in xrange(n, 0, -k):
val = val*j
return val
else:
raise NotImplementedError
def zeta(x, q=None, out=None):
r"""
Riemann or Hurwitz zeta function.
Parameters
----------
x : array_like of float
Input data, must be real
q : array_like of float, optional
Input data, must be real. Defaults to Riemann zeta.
out : ndarray, optional
Output array for the computed values.
Notes
-----
The two-argument version is the Hurwitz zeta function:
.. math:: \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x},
Riemann zeta function corresponds to ``q = 1``.
See also
--------
zetac
"""
if q is None:
q = 1
return _zeta(x, q, out)
| bsd-3-clause |
joernhees/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 26 | 6935 | import numpy as np
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.utils.fixes import norm
iris = datasets.load_iris()
data, y = iris.data, iris.target
rng = np.random.RandomState(0)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
for threshold in ["gobbledigook", ".5 * gobbledigook"]:
model = SelectFromModel(clf, threshold=threshold)
model.fit(data, y)
assert_raises(ValueError, model.transform, data)
def test_input_estimator_unchanged():
"""
Test that SelectFromModel fits on a clone of the estimator.
"""
est = RandomForestClassifier()
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
assert_true(transformer.estimator is est)
@skip_if_32bit
def test_feature_importances():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
est = RandomForestClassifier(n_estimators=50, random_state=0)
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
transformer = SelectFromModel(estimator=est, threshold=threshold)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'feature_importances_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
importances = transformer.estimator_.feature_importances_
feature_mask = np.abs(importances) > func(importances)
assert_array_almost_equal(X_new, X[:, feature_mask])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
est = RandomForestClassifier(n_estimators=50, random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(X, y, sample_weight=sample_weight)
importances = transformer.estimator_.feature_importances_
transformer.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = transformer.estimator_.feature_importances_
assert_almost_equal(importances, importances_bis)
# For the Lasso and related models, the threshold defaults to 1e-5
transformer = SelectFromModel(estimator=Lasso(alpha=0.1))
transformer.fit(X, y)
X_new = transformer.transform(X)
mask = np.abs(transformer.estimator_.coef_) > 1e-5
assert_array_equal(X_new, X[:, mask])
@skip_if_32bit
def test_feature_importances_2d_coef():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0, n_classes=4)
est = LogisticRegression()
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
for order in [1, 2, np.inf]:
# Fit SelectFromModel a multi-class problem
transformer = SelectFromModel(estimator=LogisticRegression(),
threshold=threshold,
norm_order=order)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'coef_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
# Manually check that the norm is correctly performed
est.fit(X, y)
importances = norm(est.coef_, axis=0, ord=order)
feature_mask = importances > func(importances)
assert_array_equal(X_new, X[:, feature_mask])
def test_partial_fit():
est = PassiveAggressiveClassifier(random_state=0, shuffle=False)
transformer = SelectFromModel(estimator=est)
transformer.partial_fit(data, y,
classes=np.unique(y))
old_model = transformer.estimator_
transformer.partial_fit(data, y,
classes=np.unique(y))
new_model = transformer.estimator_
assert_true(old_model is new_model)
X_transform = transformer.transform(data)
transformer.fit(np.vstack((data, data)), np.concatenate((y, y)))
assert_array_equal(X_transform, transformer.transform(data))
def test_calling_fit_reinitializes():
est = LinearSVC(random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
transformer.set_params(estimator__C=100)
transformer.fit(data, y)
assert_equal(transformer.estimator_.C, 100)
def test_prefit():
"""
Test all possible combinations of the prefit parameter.
"""
# Passing a prefit parameter with the selected model
# and fitting a unfit model with prefit=False should give same results.
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf)
model.fit(data, y)
X_transform = model.transform(data)
clf.fit(data, y)
model = SelectFromModel(clf, prefit=True)
assert_array_equal(model.transform(data), X_transform)
# Check that the model is rewritten if prefit=False and a fitted model is
# passed
model = SelectFromModel(clf, prefit=False)
model.fit(data, y)
assert_array_equal(model.transform(data), X_transform)
# Check that prefit=True and calling fit raises a ValueError
model = SelectFromModel(clf, prefit=True)
assert_raises(ValueError, model.fit, data, y)
def test_threshold_string():
est = RandomForestClassifier(n_estimators=50, random_state=0)
model = SelectFromModel(est, threshold="0.5*mean")
model.fit(data, y)
X_transform = model.transform(data)
# Calculate the threshold from the estimator directly.
est.fit(data, y)
threshold = 0.5 * np.mean(est.feature_importances_)
mask = est.feature_importances_ > threshold
assert_array_equal(X_transform, data[:, mask])
def test_threshold_without_refitting():
"""Test that the threshold can be set without refitting the model."""
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf, threshold=0.1)
model.fit(data, y)
X_transform = model.transform(data)
# Set a higher threshold to filter out more features.
model.threshold = 1.0
assert_greater(X_transform.shape[1], model.transform(data).shape[1])
| bsd-3-clause |
boada/astLib | astLib/astSED.py | 2 | 55763 | """module for performing calculations on Spectral Energy Distributions (SEDs)
(c) 2007-2013 Matt Hilton
U{http://astlib.sourceforge.net}
This module provides classes for manipulating SEDs, in particular the Bruzual &
Charlot 2003, Maraston 2005, and Percival et al 2009 stellar population
synthesis models are currently supported. Functions are provided for
calculating the evolution of colours and magnitudes in these models with
redshift etc., and for fitting broadband photometry using these models.
@var VEGA: The SED of Vega, used for calculation of magnitudes on the Vega system.
@type VEGA: L{SED} object
@var AB: Flat spectrum SED, used for calculation of magnitudes on the AB system.
@type AB: L{SED} object
@var SOL: The SED of the Sun.
@type SOL: L{SED} object
"""
#-----------------------------------------------------------------------------
import numpy
import math
import operator
try:
from scipy import interpolate
from scipy import ndimage
except ImportError:
print("WARNING: astSED: failed to import scipy modules - some functions "
"will not work.")
import astLib
from astLib import astCalc
import os
try:
import matplotlib
from matplotlib import pylab
matplotlib.interactive(False)
except ImportError:
print("WARNING: astSED: failed to import matplotlib - some functions will "
"not work.")
import glob
#-----------------------------------------------------------------------------
class Passband:
"""This class describes a filter transmission curve. Passband objects are
created by loading data from from text files containing wavelength in
angstroms in the first column, relative transmission efficiency in the
second column (whitespace delimited). For example, to create a Passband
object for the 2MASS J filter:
passband=astSED.Passband("J_2MASS.res")
where "J_2MASS.res" is a file in the current working directory that
describes the filter.
Wavelength units can be specified as 'angstroms', 'nanometres' or
'microns'; if either of the latter, they will be converted to angstroms.
"""
def __init__(self, fileName, normalise=True, inputUnits='angstroms'):
inFile = open(fileName, "r")
lines = inFile.readlines()
wavelength = []
transmission = []
for line in lines:
if line[0] != "#" and len(line) > 3:
bits = line.split()
transmission.append(float(bits[1]))
wavelength.append(float(bits[0]))
self.wavelength = numpy.array(wavelength)
self.transmission = numpy.array(transmission)
if inputUnits == 'angstroms':
pass
elif inputUnits == 'nanometres':
self.wavelength = self.wavelength * 10.0
elif inputUnits == 'microns':
self.wavelength = self.wavelength * 10000.0
elif inputUnits == 'mm':
self.wavelength = self.wavelength * 1e7
elif inputUnits == 'GHz':
self.wavelength = 3e8 / (self.wavelength * 1e9)
self.wavelength = self.wavelength * 1e10
else:
raise Exception("didn't understand passband input units")
# Sort into ascending order of wavelength otherwise normalisation will be wrong
merged = numpy.array([self.wavelength, self.transmission]).transpose()
sortedMerged = numpy.array(sorted(merged, key=operator.itemgetter(0)))
self.wavelength = sortedMerged[:, 0]
self.transmission = sortedMerged[:, 1]
if normalise:
self.transmission = self.transmission / numpy.trapz(
self.transmission, self.wavelength)
# Store a ready-to-go interpolation object to speed calculation of fluxes up
self.interpolator = interpolate.interp1d(self.wavelength,
self.transmission,
kind='linear')
def asList(self):
"""Returns a two dimensional list of [wavelength, transmission],
suitable for plotting by gnuplot.
@rtype: list
@return: list in format [wavelength, transmission]
"""
listData = []
for l, f in zip(self.wavelength, self.transmission):
listData.append([l, f])
return listData
def rescale(self, maxTransmission):
"""Rescales the passband so that maximum value of the transmission is
equal to maxTransmission. Useful for plotting.
@type maxTransmission: float
@param maxTransmission: maximum value of rescaled transmission curve
"""
self.transmission = self.transmission * (maxTransmission /
self.transmission.max())
def plot(self, xmin='min', xmax='max', maxTransmission=None):
"""Plots the passband, rescaling the maximum of the tranmission curve
to maxTransmission if required.
@type xmin: float or 'min'
@param xmin: minimum of the wavelength range of the plot
@type xmax: float or 'max'
@param xmax: maximum of the wavelength range of the plot
@type maxTransmission: float
@param maxTransmission: maximum value of rescaled transmission curve
"""
if maxTransmission is not None:
self.rescale(maxTransmission)
pylab.matplotlib.interactive(True)
pylab.plot(self.wavelength, self.transmission)
if xmin == 'min':
xmin = self.wavelength.min()
if xmax == 'max':
xmax = self.wavelength.max()
pylab.xlim(xmin, xmax)
pylab.xlabel("Wavelength")
pylab.ylabel("Relative Flux")
def effectiveWavelength(self):
"""Calculates effective wavelength for the passband. This is the same
as equation (3) of Carter et al. 2009.
@rtype: float
@return: effective wavelength of the passband, in Angstroms
"""
a = numpy.trapz(self.transmission * self.wavelength, self.wavelength)
b = numpy.trapz(self.transmission / self.wavelength, self.wavelength)
effWavelength = numpy.sqrt(a / b)
return effWavelength
#-----------------------------------------------------------------------------
class TopHatPassband(Passband):
"""This class generates a passband with a top hat response between the
given wavelengths.
"""
def __init__(self, wavelengthMin, wavelengthMax, normalise=True):
"""Generates a passband object with top hat response between
wavelengthMin, wavelengthMax. Units are assumed to be Angstroms.
@type wavelengthMin: float
@param wavelengthMin: minimum of the wavelength range of the passband
@type wavelengthMax: float
@param wavelengthMax: maximum of the wavelength range of the passband
@type normalise: bool
@param normalise: if True, scale such that total area under the
passband over the wavelength
range is 1.
"""
self.wavelength = numpy.arange(
wavelengthMin, wavelengthMax + 10,
10, dtype=float)
self.transmission = numpy.ones(self.wavelength.shape, dtype=float)
if normalise:
self.transmission = self.transmission / numpy.trapz(
self.transmission, self.wavelength)
# Store a ready-to-go interpolation object to speed calculation of fluxes up
self.interpolator = interpolate.interp1d(self.wavelength,
self.transmission,
kind='linear')
#-----------------------------------------------------------------------------
class SED:
"""This class describes a Spectral Energy Distribution (SED).
To create a SED object, lists (or numpy arrays) of wavelength and relative
flux must be provided. The SED can optionally be redshifted. The wavelength
units of SEDs are assumed to be Angstroms - flux calculations using
Passband and SED objects specified with different wavelength units will be
incorrect.
The L{StellarPopulation} class (and derivatives) can be used to extract
SEDs for specified ages from e.g. the Bruzual & Charlot 2003 or Maraston
2005 models.
"""
def __init__(self,
wavelength=[],
flux=[],
z=0.0,
ageGyr=None,
normalise=False,
label=None):
# We keep a copy of the wavelength, flux at z = 0, as it's more robust
# to copy that to self.wavelength, flux and redshift it, rather than
# repeatedly redshifting the same arrays back and forth
self.z0wavelength = numpy.array(wavelength)
self.z0flux = numpy.array(flux)
self.wavelength = numpy.array(wavelength)
self.flux = numpy.array(flux)
self.z = z
self.label = label # plain text label, handy for using in photo-z codes
# Store the intrinsic (i.e. unextincted) flux in case we change
# extinction
self.EBMinusV = 0.0
self.intrinsic_z0flux = numpy.array(flux)
if normalise:
self.normalise()
if z != 0.0:
self.redshift(z)
self.ageGyr = ageGyr
def copy(self):
"""Copies the SED, returning a new SED object
@rtype: L{SED} object
@return: SED
"""
newSED = SED(wavelength=self.z0wavelength,
flux=self.z0flux,
z=self.z,
ageGyr=self.ageGyr,
normalise=False,
label=self.label)
return newSED
def loadFromFile(self, fileName):
"""Loads SED from a white space delimited file in the format
wavelength, flux. Lines beginning with # are ignored.
@type fileName: string
@param fileName: path to file containing wavelength, flux data
"""
inFile = open(fileName, "r")
lines = inFile.readlines()
inFile.close()
wavelength = []
flux = []
for line in lines:
if line[0] != "#" and len(line) > 3:
bits = line.split()
wavelength.append(float(bits[0]))
flux.append(float(bits[1]))
# Sort SED so wavelength is in ascending order
if wavelength[0] > wavelength[-1]:
wavelength.reverse()
flux.reverse()
self.z0wavelength = numpy.array(wavelength)
self.z0flux = numpy.array(flux)
self.wavelength = numpy.array(wavelength)
self.flux = numpy.array(flux)
def writeToFile(self, fileName):
"""Writes SED to a white space delimited file in the format wavelength,
flux.
@type fileName: string
@param fileName: path to file
"""
outFile = open(fileName, "w")
for l, f in zip(self.wavelength, self.flux):
outFile.write(str(l) + " " + str(f) + "\n")
outFile.close()
def asList(self):
"""Returns a two dimensional list of [wavelength, flux], suitable for
plotting by gnuplot.
@rtype: list
@return: list in format [wavelength, flux]
"""
listData = []
for l, f in zip(self.wavelength, self.flux):
listData.append([l, f])
return listData
def plot(self, xmin='min', xmax='max'):
"""Produces a simple (wavelength, flux) plot of the SED.
@type xmin: float or 'min'
@param xmin: minimum of the wavelength range of the plot
@type xmax: float or 'max'
@param xmax: maximum of the wavelength range of the plot
"""
pylab.matplotlib.interactive(True)
pylab.plot(self.wavelength, self.flux)
if xmin == 'min':
xmin = self.wavelength.min()
if xmax == 'max':
xmax = self.wavelength.max()
# Sensible y scale
plotMask = numpy.logical_and(
numpy.greater(self.wavelength, xmin), numpy.less(self.wavelength,
xmax))
plotMax = self.flux[plotMask].max()
pylab.ylim(0, plotMax * 1.1)
pylab.xlim(xmin, xmax)
pylab.xlabel("Wavelength")
pylab.ylabel("Relative Flux")
def integrate(self, wavelengthMin='min', wavelengthMax='max'):
"""Calculates flux in SED within given wavelength range.
@type wavelengthMin: float or 'min'
@param wavelengthMin: minimum of the wavelength range
@type wavelengthMax: float or 'max'
@param wavelengthMax: maximum of the wavelength range
@rtype: float
@return: relative flux
"""
if wavelengthMin == 'min':
wavelengthMin = self.wavelength.min()
if wavelengthMax == 'max':
wavelengthMax = self.wavelength.max()
mask = numpy.logical_and(numpy.greater(self.wavelength, wavelengthMin),
numpy.less(self.wavelength, wavelengthMax))
flux = numpy.trapz(self.flux[mask], self.wavelength[mask])
return flux
def smooth(self, smoothPix):
"""Smooths SED.flux with a uniform (boxcar) filter of width smoothPix.
Cannot be undone.
@type smoothPix: int
@param smoothPix: size of uniform filter applied to SED, in pixels
"""
smoothed = ndimage.uniform_filter1d(self.flux, smoothPix)
self.flux = smoothed
def redshift(self, z):
"""Redshifts the SED to redshift z.
@type z: float
@param z: redshift
"""
# We have to conserve energy so the area under the redshifted SED has
# to be equal to the area under the unredshifted SED, otherwise
# magnitude calculations will be wrong when comparing SEDs at different
# zs
self.wavelength = numpy.zeros(self.z0wavelength.shape[0])
self.flux = numpy.zeros(self.z0flux.shape[0])
self.wavelength = self.wavelength + self.z0wavelength
self.flux = self.flux + self.z0flux
z0TotalFlux = numpy.trapz(self.z0wavelength, self.z0flux)
self.wavelength = self.wavelength * (1.0 + z)
zTotalFlux = numpy.trapz(self.wavelength, self.flux)
self.flux = self.flux * (z0TotalFlux / zTotalFlux)
self.z = z
def normalise(self, minWavelength='min', maxWavelength='max'):
"""Normalises the SED such that the area under the specified wavelength
range is equal to 1.
@type minWavelength: float or 'min'
@param minWavelength: minimum wavelength of range over which to
normalise SED
@type maxWavelength: float or 'max'
@param maxWavelength: maximum wavelength of range over which to
normalise SED
"""
if minWavelength == 'min':
minWavelength = self.wavelength.min()
if maxWavelength == 'max':
maxWavelength = self.wavelength.max()
lowCut = numpy.greater(self.wavelength, minWavelength)
highCut = numpy.less(self.wavelength, maxWavelength)
totalCut = numpy.logical_and(lowCut, highCut)
sedFluxSlice = self.flux[totalCut]
sedWavelengthSlice = self.wavelength[totalCut]
self.flux = self.flux / numpy.trapz(
abs(sedFluxSlice), sedWavelengthSlice) # self.wavelength)
def normaliseToMag(self, ABMag, passband):
"""Normalises the SED to match the flux equivalent to the given AB
magnitude in the given passband.
@type ABMag: float
@param ABMag: AB magnitude to which the SED is to be normalised at the
given passband
@type passband: an L{Passband} object
@param passband: passband at which normalisation to AB magnitude is
calculated
"""
magFlux = mag2Flux(ABMag, 0.0, passband)
sedFlux = self.calcFlux(passband)
norm = magFlux[0] / sedFlux
self.flux = self.flux * norm
self.z0flux = self.z0flux * norm
def matchFlux(self, matchSED, minWavelength, maxWavelength):
"""Matches the flux in the wavelength range given by minWavelength,
maxWavelength to the flux in the same region in matchSED. Useful for
plotting purposes.
@type matchSED: L{SED} object
@param matchSED: SED to match flux to
@type minWavelength: float
@param minWavelength: minimum of range in which to match flux of
current SED to matchSED
@type maxWavelength: float
@param maxWavelength: maximum of range in which to match flux of
current SED to matchSED
"""
interpMatch = interpolate.interp1d(matchSED.wavelength,
matchSED.flux,
kind='linear')
interpSelf = interpolate.interp1d(self.wavelength,
self.flux,
kind='linear')
wavelengthRange = numpy.arange(minWavelength, maxWavelength, 5.0)
matchFlux = numpy.trapz(interpMatch(wavelengthRange), wavelengthRange)
selfFlux = numpy.trapz(interpSelf(wavelengthRange), wavelengthRange)
self.flux = self.flux * (matchFlux / selfFlux)
def calcFlux(self, passband):
"""Calculates flux in the given passband.
@type passband: L{Passband} object
@param passband: filter passband through which to calculate the flux
from the SED
@rtype: float
@return: flux
"""
lowCut = numpy.greater(self.wavelength, passband.wavelength.min())
highCut = numpy.less(self.wavelength, passband.wavelength.max())
totalCut = numpy.logical_and(lowCut, highCut)
sedFluxSlice = self.flux[totalCut]
sedWavelengthSlice = self.wavelength[totalCut]
# Use linear interpolation to rebin the passband to the same dimensions as the
# part of the SED we're interested in
sedInBand = passband.interpolator(sedWavelengthSlice) * sedFluxSlice
totalFlux = numpy.trapz(sedInBand * sedWavelengthSlice,
sedWavelengthSlice)
totalFlux = totalFlux /\
numpy.trapz(passband.interpolator(sedWavelengthSlice) *
sedWavelengthSlice, sedWavelengthSlice)
return totalFlux
def calcMag(self, passband, addDistanceModulus=True, magType="Vega"):
"""Calculates magnitude in the given passband. If addDistanceModulus ==
True, then the distance modulus (5.0*log10*(dl*1e5), where dl is the
luminosity distance in Mpc at the redshift of the L{SED}) is added.
@type passband: L{Passband} object
@param passband: filter passband through which to calculate the
magnitude from the SED
@type addDistanceModulus: bool
@param addDistanceModulus: if True, adds 5.0*log10*(dl*1e5) to the mag
returned, where dl is the luminosity distance (Mpc) corresponding to
the SED z
@type magType: string
@param magType: either "Vega" or "AB"
@rtype: float
@return: magnitude through the given passband on the specified
magnitude system
"""
f1 = self.calcFlux(passband)
if magType == "Vega":
f2 = VEGA.calcFlux(passband)
elif magType == "AB":
f2 = AB.calcFlux(passband)
mag = -2.5 * math.log10(f1 / f2)
if magType == "Vega":
# Add 0.026 because Vega has V=0.026 (e.g. Bohlin & Gilliland 2004)
mag += 0.026
if self.z > 0.0 and addDistanceModulus:
appMag = 5.0 * math.log10(astCalc.dl(self.z) * 1e5) + mag
else:
appMag = mag
return appMag
def calcColour(self, passband1, passband2, magType="Vega"):
"""Calculates the colour passband1-passband2.
@type passband1: L{Passband} object
@param passband1: filter passband through which to calculate the first
magnitude
@type passband2: L{Passband} object
@param passband1: filter passband through which to calculate the second
magnitude
@type magType: string
@param magType: either "Vega" or "AB"
@rtype: float
@return: colour defined by passband1 - passband2 on the specified
magnitude system
"""
mag1 = self.calcMag(passband1,
magType=magType,
addDistanceModulus=True)
mag2 = self.calcMag(passband2,
magType=magType,
addDistanceModulus=True)
colour = mag1 - mag2
return colour
def getSEDDict(self, passbands):
"""This is a convenience function for pulling out fluxes from a SED for
a given set of passbands
in the same format as made by L{mags2SEDDict} - designed to make
fitting code simpler.
@type passbands: list of L{Passband} objects
@param passbands: list of passbands through which fluxes will be
calculated
"""
flux = []
wavelength = []
for p in passbands:
flux.append(self.calcFlux(p))
wavelength.append(p.effectiveWavelength())
SEDDict = {}
SEDDict['flux'] = numpy.array(flux)
SEDDict['wavelength'] = numpy.array(wavelength)
return SEDDict
def extinctionCalzetti(self, EBMinusV):
"""Applies the Calzetti et al. 2000 (ApJ, 533, 682) extinction law to
the SED with the given E(B-V) amount of extinction. R_v' = 4.05 is
assumed (see equation (5) of Calzetti et al.).
@type EBMinusV: float
@param EBMinusV: extinction E(B-V), in magnitudes
"""
self.EBMinusV = EBMinusV
# All done in rest frame
self.z0flux = self.intrinsic_z0flux
# Allow us to set EBMinusV == 0 to turn extinction off
if EBMinusV > 0:
# Note that EBMinusV is assumed to be Es as in equations (2) - (5)
# Note here wavelength units have to be microns for constants to
# make sense
RvPrime = 4.05 # equation (5) of Calzetti et al. 2000
shortWavelengthMask =\
numpy.logical_and(numpy.greater_equal(self.z0wavelength, 1200),
numpy.less(self.z0wavelength, 6300))
longWavelengthMask =\
numpy.logical_and(numpy.greater_equal(self.z0wavelength, 6300),
numpy.less_equal(self.z0wavelength, 22000))
wavelengthMicrons = numpy.array(self.z0wavelength / 10000.0,
dtype=numpy.float64)
kPrime = numpy.zeros(self.z0wavelength.shape[0],
dtype=numpy.float64)
kPrimeLong = (2.659 * (-1.857 + 1.040 / wavelengthMicrons)) +\
RvPrime
kPrimeShort = (2.659 * (-2.156 + 1.509 / wavelengthMicrons -
0.198 / wavelengthMicrons**2 + 0.011 /
wavelengthMicrons**3)) + RvPrime
kPrime[longWavelengthMask] = kPrimeLong[longWavelengthMask]
kPrime[shortWavelengthMask] = kPrimeShort[shortWavelengthMask]
# Here we extrapolate kPrime in similar way to what HYPERZ does
# Short wavelengths
try:
interpolator = interpolate.interp1d(self.z0wavelength,
kPrimeShort,
kind='linear')
slope = (interpolator(1100.0) - interpolator(1200.0)) / (
1100.0 - 1200.0)
intercept = interpolator(1200.0) - (slope * 1200.0)
mask = numpy.less(self.z0wavelength, 1200.0)
kPrime[mask] = slope * self.z0wavelength[mask] + intercept
# Long wavelengths
interpolator = interpolate.interp1d(self.z0wavelength,
kPrimeLong,
kind='linear')
slope = (interpolator(21900.0) - interpolator(22000.0)) / (
21900.0 - 22000.0)
intercept = interpolator(21900.0) - (slope * 21900.0)
mask = numpy.greater(self.z0wavelength, 22000.0)
kPrime[mask] = slope * self.z0wavelength[mask] + intercept
except:
raise Exception("This SED has a wavelength range that doesn't "
"cover ~1200-22000 Angstroms")
# Never let go negative
kPrime[numpy.less_equal(kPrime, 0.0)] = 1e-6
reddening = numpy.power(10, 0.4 * EBMinusV * kPrime)
self.z0flux = self.z0flux / reddening
self.redshift(self.z)
#-----------------------------------------------------------------------------
class VegaSED(SED):
"""This class stores the SED of Vega, used for calculation of magnitudes on the Vega system.
The Vega SED used is taken from Bohlin 2007
(http://adsabs.harvard.edu/abs/2007ASPC..364..315B), and is available from
the STScI CALSPEC library
(http://www.stsci.edu/hst/observatory/cdbs/calspec.html).
"""
def __init__(self, normalise=False):
VEGA_SED_PATH = astLib.__path__[
0] + os.path.sep + "data" + os.path.sep + "bohlin2006_Vega.sed" # from HST CALSPEC
inFile = open(VEGA_SED_PATH, "r")
lines = inFile.readlines()
wavelength = []
flux = []
for line in lines:
if line[0] != "#" and len(line) > 3:
bits = line.split()
flux.append(float(bits[1]))
wavelength.append(float(bits[0]))
self.wavelength = numpy.array(wavelength)
self.flux = numpy.array(flux, dtype=numpy.float64)
# We may want to redshift reference SEDs to calculate rest-frame colors
# from SEDs at different zs
self.z0wavelength = numpy.array(wavelength)
self.z0flux = numpy.array(flux, dtype=numpy.float64)
self.z = 0.0
#if normalise == True:
#self.flux=self.flux/numpy.trapz(self.flux, self.wavelength)
#self.z0flux=self.z0flux/numpy.trapz(self.z0flux, self.z0wavelength)
#-----------------------------------------------------------------------------
class StellarPopulation:
"""This class describes a stellar population model, either a Simple Stellar
Population (SSP) or a Composite Stellar Population (CSP), such as the
models of Bruzual & Charlot 2003 or Maraston 2005.
The constructor for this class can be used for generic SSPs or CSPs stored
in white space delimited text files, containing columns for age,
wavelength, and flux. Columns are counted from 0 ... n. Lines starting
with # are ignored.
The classes L{M05Model} (for Maraston 2005 models), L{BC03Model} (for
Bruzual & Charlot 2003 models), and L{P09Model} (for Percival et al. 2009
models) are derived from this class. The only difference between them is
the code used to load in the model data.
"""
def __init__(self,
fileName,
ageColumn=0,
wavelengthColumn=1,
fluxColumn=2):
inFile = open(fileName, "r")
lines = inFile.readlines()
inFile.close()
self.fileName = fileName
# Extract a list of model ages and valid wavelengths from the file
self.ages = []
self.wavelengths = []
for line in lines:
if line[0] != "#" and len(line) > 3:
bits = line.split()
age = float(bits[ageColumn])
wavelength = float(bits[wavelengthColumn])
if age not in self.ages:
self.ages.append(age)
if wavelength not in self.wavelengths:
self.wavelengths.append(wavelength)
# Construct a grid of flux - rows correspond to each wavelength, columns to age
self.fluxGrid = numpy.zeros([len(self.ages), len(self.wavelengths)])
for line in lines:
if line[0] != "#" and len(line) > 3:
bits = line.split()
sedAge = float(bits[ageColumn])
sedWavelength = float(bits[wavelengthColumn])
sedFlux = float(bits[fluxColumn])
row = self.ages.index(sedAge)
column = self.wavelengths.index(sedWavelength)
self.fluxGrid[row][column] = sedFlux
def getSED(self, ageGyr, z=0.0, normalise=False, label=None):
"""Extract a SED for given age. Do linear interpolation between models
if necessary.
@type ageGyr: float
@param ageGyr: age of the SED in Gyr
@type z: float
@param z: redshift the SED from z = 0 to z = z
@type normalise: bool
@param normalise: normalise the SED to have area 1
@rtype: L{SED} object
@return: SED
"""
if ageGyr in self.ages:
flux = self.fluxGrid[self.ages.index(ageGyr)]
sed = SED(self.wavelengths,
flux,
z=z,
normalise=normalise,
label=label)
return sed
else:
# Use interpolation, iterating over each wavelength column
flux = []
for i in range(len(self.wavelengths)):
interpolator = interpolate.interp1d(self.ages,
self.fluxGrid[:, i],
kind='linear')
sedFlux = interpolator(ageGyr)
flux.append(sedFlux)
sed = SED(self.wavelengths,
flux,
z=z,
normalise=normalise,
label=label)
return sed
def getColourEvolution(self,
passband1,
passband2,
zFormation,
zStepSize=0.05,
magType="Vega"):
"""Calculates the evolution of the colour observed through passband1 -
passband2 for the StellarPopulation with redshift, from z = 0 to z =
zFormation.
@type passband1: L{Passband} object
@param passband1: filter passband through which to calculate the first
magnitude
@type passband2: L{Passband} object
@param passband2: filter passband through which to calculate the second
magnitude
@type zFormation: float
@param zFormation: formation redshift of the StellarPopulation
@type zStepSize: float
@param zStepSize: size of interval in z at which to calculate model
colours
@type magType: string
@param magType: either "Vega" or "AB"
@rtype: dictionary
@return: dictionary of numpy.arrays in format {'z', 'colour'}
"""
zSteps = int(math.ceil(zFormation / zStepSize))
zData = []
colourData = []
for i in range(1, zSteps):
zc = i * zStepSize
age = astCalc.tl(zFormation) - astCalc.tl(zc)
sed = self.getSED(age, z=zc)
colour = sed.calcColour(passband1, passband2, magType=magType)
zData.append(zc)
colourData.append(colour)
zData = numpy.array(zData)
colourData = numpy.array(colourData)
return {'z': zData, 'colour': colourData}
def getMagEvolution(self,
passband,
magNormalisation,
zNormalisation,
zFormation,
zStepSize=0.05,
onePlusZSteps=False,
magType="Vega"):
"""Calculates the evolution with redshift (from z = 0 to z =
zFormation) of apparent magnitude in the observed frame through the
passband for the StellarPopulation, normalised to magNormalisation
(apparent) at z = zNormalisation.
@type passband: L{Passband} object
@param passband: filter passband through which to calculate the
magnitude
@type magNormalisation: float
@param magNormalisation: sets the apparent magnitude of the SED at
zNormalisation
@type zNormalisation: float
@param zNormalisation: the redshift at which the magnitude
normalisation is carried out
@type zFormation: float
@param zFormation: formation redshift of the StellarPopulation
@type zStepSize: float
@param zStepSize: size of interval in z at which to calculate model
magnitudes
@type onePlusZSteps: bool
@param onePlusZSteps: if True, zSteps are (1+z)*zStepSize, otherwise
zSteps are linear
@type magType: string
@param magType: either "Vega" or "AB"
@rtype: dictionary
@return: dictionary of numpy.arrays in format {'z', 'mag'}
"""
# Count upwards in z steps as interpolation doesn't work if array ordered z decreasing
zSteps = int(math.ceil(zFormation / zStepSize))
zData = []
magData = []
absMagData = []
zc0 = 0.0
for i in range(1, zSteps):
if not onePlusZSteps:
zc = i * zStepSize
else:
zc = zc0 + (1 + zc0) * zStepSize
zc0 = zc
if zc >= zFormation:
break
age = astCalc.tl(zFormation) - astCalc.tl(zc)
sed = self.getSED(age, z=zc)
mag = sed.calcMag(passband,
magType=magType,
addDistanceModulus=True)
zData.append(zc)
magData.append(mag)
absMagData.append(sed.calcMag(passband, addDistanceModulus=False))
zData = numpy.array(zData)
magData = numpy.array(magData)
# Do the normalisation
interpolator = interpolate.interp1d(zData, magData, kind='linear')
modelNormMag = interpolator(zNormalisation)
normConstant = magNormalisation - modelNormMag
magData = magData + normConstant
return {'z': zData, 'mag': magData}
def calcEvolutionCorrection(self,
zFrom,
zTo,
zFormation,
passband,
magType="Vega"):
"""Calculates the evolution correction in magnitudes in the rest frame
through the passband from redshift zFrom to redshift zTo, where the
stellarPopulation is assumed to be formed at redshift zFormation.
@type zFrom: float
@param zFormation: redshift to evolution correct from
@type zTo: float
@param zTo: redshift to evolution correct to
@type zFormation: float
@param zFormation: formation redshift of the StellarPopulation
@type passband: L{Passband} object
@param passband: filter passband through which to calculate magnitude
@type magType: string
@param magType: either "Vega" or "AB"
@rtype: float
@return: evolution correction in magnitudes in the rest frame
"""
ageFrom = astCalc.tl(zFormation) - astCalc.tl(zFrom)
ageTo = astCalc.tl(zFormation) - astCalc.tl(zTo)
fromSED = self.getSED(ageFrom)
toSED = self.getSED(ageTo)
fromMag = fromSED.calcMag(passband,
magType=magType,
addDistanceModulus=False)
toMag = toSED.calcMag(passband,
magType=magType,
addDistanceModulus=False)
return fromMag - toMag
#-----------------------------------------------------------------------------
class M05Model(StellarPopulation):
"""This class describes a Maraston 2005 stellar population model. To load a
composite stellar population model (CSP) for a tau = 0.1 Gyr burst of star
formation, solar metallicity, Salpeter IMF:
m05csp = astSED.M05Model(M05_DIR+"/csp_e_0.10_z02_salp.sed_agb")
where M05_DIR is set to point to the directory where the Maraston 2005
models are stored on your system.
The file format of the Maraston 2005 simple stellar poulation (SSP) models
is different to the file format used for the CSPs, and this needs to be
specified using the fileType parameter. To load a SSP with solar
metallicity, red horizontal branch morphology:
m05ssp = astSED.M05Model(M05_DIR+"/sed.ssz002.rhb", fileType = "ssp")
The wavelength units of SEDs from M05 models are Angstroms, with flux in
units of erg/s/Angstrom.
"""
def __init__(self, fileName, fileType="csp"):
self.modelFamily = "M05"
inFile = open(fileName, "r")
lines = inFile.readlines()
inFile.close()
self.fileName = fileName
if fileType == "csp":
ageColumn = 0
wavelengthColumn = 1
fluxColumn = 2
elif fileType == "ssp":
ageColumn = 0
wavelengthColumn = 2
fluxColumn = 3
else:
raise Exception("fileType must be 'ssp' or 'csp'")
# Extract a list of model ages and valid wavelengths from the file
self.ages = []
self.wavelengths = []
for line in lines:
if line[0] != "#" and len(line) > 3:
bits = line.split()
age = float(bits[ageColumn])
wavelength = float(bits[wavelengthColumn])
if age not in self.ages:
self.ages.append(age)
if wavelength not in self.wavelengths:
self.wavelengths.append(wavelength)
# Construct a grid of flux - rows correspond to each wavelength, columns to age
self.fluxGrid = numpy.zeros([len(self.ages), len(self.wavelengths)])
for line in lines:
if line[0] != "#" and len(line) > 3:
bits = line.split()
sedAge = float(bits[ageColumn])
sedWavelength = float(bits[wavelengthColumn])
sedFlux = float(bits[fluxColumn])
row = self.ages.index(sedAge)
column = self.wavelengths.index(sedWavelength)
self.fluxGrid[row][column] = sedFlux
#-----------------------------------------------------------------------------
class BC03Model(StellarPopulation):
"""This class describes a Bruzual & Charlot 2003 stellar population model,
extracted from a GALAXEV .ised file using the galaxevpl program that is
included in GALAXEV. The file format is white space delimited, with
wavelength in the first column. Subsequent columns contain the model fluxes
for SEDs of different ages, as specified when running galaxevpl. The age
corresponding to each flux column is taken from the comment line beginning
"# Age (yr)", and is converted to Gyr.
For example, to load a tau = 0.1 Gyr burst of star formation, solar
metallicity, Salpeter IMF model stored in a file (created by galaxevpl)
called "csp_lr_solar_0p1Gyr.136":
bc03model = BC03Model("csp_lr_solar_0p1Gyr.136")
The wavelength units of SEDs from BC03 models are Angstroms. Flux is
converted into units of erg/s/Angstrom (the units in the files output by
galaxevpl are LSun/Angstrom).
"""
def __init__(self, fileName):
self.modelFamily = "BC03"
self.fileName = fileName
inFile = open(fileName, "r")
lines = inFile.readlines()
inFile.close()
# Extract a list of model ages - BC03 ages are in years, so convert to Gyr
self.ages = []
for line in lines:
if line.find("# Age (yr)") != -1:
rawAges = line[line.find("# Age (yr)") + 10:].split()
for age in rawAges:
self.ages.append(float(age) / 1e9)
# Extract a list of valid wavelengths from the file
# If we have many ages in the file, this is more complicated...
lambdaLinesCount = 0
startFluxDataLine = None
for i in range(len(lines)):
line = lines[i]
if "# Lambda(A)" in line:
lambdaLinesCount = lambdaLinesCount + 1
if line[0] != "#" and len(line) > 3 and startFluxDataLine is None:
startFluxDataLine = i
self.wavelengths = []
for i in range(startFluxDataLine, len(lines), lambdaLinesCount):
line = lines[i]
bits = line.split()
self.wavelengths.append(float(bits[0]))
# Construct a grid of flux - rows correspond to each wavelength, columns to age
self.fluxGrid = numpy.zeros([len(self.ages), len(self.wavelengths)])
for i in range(startFluxDataLine, len(lines), lambdaLinesCount):
line = lines[i]
bits = []
for k in range(i, i + lambdaLinesCount):
bits = bits + lines[k].split()
ageFluxes = bits[1:]
sedWavelength = float(bits[0])
column = self.wavelengths.index(sedWavelength)
for row in range(len(ageFluxes)):
sedFlux = float(ageFluxes[row])
self.fluxGrid[row][column] = sedFlux
# Convert flux into erg/s/Angstrom - native units of galaxevpl files are LSun/Angstrom
self.fluxGrid = self.fluxGrid * 3.826e33
#-----------------------------------------------------------------------------
class P09Model(StellarPopulation):
"""This class describes a Percival et al 2009 (BaSTI;
http://albione.oa-teramo.inaf.it) stellar population model. We assume that
the synthetic spectra for each model are unpacked under the directory
pointed to by fileName.
The wavelength units of SEDs from P09 models are converted to Angstroms.
Flux is converted into units of erg/s/Angstrom (the units in the BaSTI
low-res spectra are 4.3607e-33 erg/s/m).
"""
def __init__(self, fileName):
self.modelFamily = "P09"
files = glob.glob(fileName + os.path.sep + "*.t??????")
self.fileName = fileName
# Map end of filenames to ages in Gyr
extensionAgeMap = {}
self.ages = []
for f in files:
ext = f.split(".")[-1]
ageGyr = float(f[-5:]) / 1e3
self.ages.append(ageGyr)
extensionAgeMap[ext] = ageGyr
self.ages.sort()
# Construct a grid of flux - rows correspond to each wavelength, columns to age
self.wavelengths = None
self.fluxGrid = None
for i in range(len(self.ages)):
for e in extensionAgeMap.keys():
if extensionAgeMap[e] == self.ages[i]:
inFileName = glob.glob(fileName + os.path.sep + "*." + e)[
0]
inFile = open(inFileName, "r")
lines = inFile.readlines()
inFile.close()
wavelength = []
flux = []
for line in lines:
bits = line.split()
wavelength.append(
float(bits[0]) *
10.0) # units in file are nm, not angstroms
flux.append(float(bits[1]))
if self.wavelengths is None:
self.wavelengths = wavelength
if self.fluxGrid is None:
self.fluxGrid = numpy.zeros(
[len(self.ages), len(self.wavelengths)])
self.fluxGrid[i] = flux
# Convert flux into erg/s/Angstrom - native units in BaSTI files are
# 4.3607e-33 erg/s/m
self.fluxGrid = self.fluxGrid / 4.3607e-33 / 1e10
#-----------------------------------------------------------------------------
def makeModelSEDDictList(modelList,
z,
passbandsList,
labelsList=[],
EBMinusVList=[0.0],
forceYoungerThanUniverse=True):
"""This routine makes a list of SEDDict dictionaries (see L{mags2SEDDict})
for fitting using L{fitSEDDict}. This speeds up the fitting as this allows
us to calculate model SED magnitudes only once, if all objects to be fitted
are at the same redshift. We add some meta data to the modelSEDDicts (e.g.
the model file names).
The effect of extinction by dust (assuming the Calzetti et al. 2000 law)
can be included by giving a list of E(B-V) values.
If forceYoungerThanUniverse == True, ages which are older than the universe
at the given z will not be included.
@type modelList: list of L{StellarPopulation} model objects
@param modelList: list of StellarPopulation models to include
@type z: float
@param z: redshift to apply to all stellar population models in modelList
@type EBMinusVList: list
@param EBMinusVList: list of E(B-V) extinction values to apply to all
models, in magnitudes
@type labelsList: list
@param labelsList: optional list used for labelling passbands in output
SEDDicts
@type forceYoungerThanUniverse: bool
@param forceYoungerThanUniverse: if True, do not allow models that exceed
the age of the universe at z
@rtype: list
@return: list of dictionaries containing model fluxes, to be used as input
to L{fitSEDDict}.
"""
# Otherwise if this is the case we won't actually make any model SEDDicts ...
if EBMinusVList == []:
EBMinusVList = [0.0]
modelSEDDictList = []
for m in range(len(modelList)):
testAges = numpy.array(modelList[m].ages)
if forceYoungerThanUniverse:
testAges = testAges[numpy.logical_and(
numpy.less(testAges, astCalc.tz(z)), numpy.greater(testAges,
0))]
for t in testAges:
s = modelList[m].getSED(
t,
z=z,
label=modelList[m].fileName + " - age=" + str(t) + " Gyr")
for EBMinusV in EBMinusVList:
try:
s.extinctionCalzetti(EBMinusV)
except:
raise Exception(
"Model %s has a wavelength range that doesn't cover ~1200-22000 Angstroms"
% (modelList[m].fileName))
modelSEDDict = s.getSEDDict(passbandsList)
modelSEDDict['labels'] = labelsList
modelSEDDict['E(B-V)'] = EBMinusV
modelSEDDict['ageGyr'] = t
modelSEDDict['z'] = z
modelSEDDict['fileName'] = modelList[m].fileName
modelSEDDict['modelListIndex'] = m
modelSEDDictList.append(modelSEDDict)
return modelSEDDictList
#-----------------------------------------------------------------------------
def fitSEDDict(SEDDict, modelSEDDictList):
"""Fits the given SED dictionary (made using L{mags2SEDDict}) with the
given list of model SED dictionaries. The latter should be made using
L{makeModelSEDDictList}, and entries for fluxes should correspond directly
between the model and SEDDict.
Returns a dictionary with best fit values.
@type SEDDict: dictionary, in format of L{mags2SEDDict}
@param SEDDict: dictionary of observed fluxes and uncertainties, in format
of L{mags2SEDDict}
@type modelSEDDictList: list of dictionaries, in format of
L{makeModelSEDDictList}
@param modelSEDDictList: list of dictionaries containing fluxes of models
to be fitted to the observed fluxes listed in the SEDDict. This should be
made using L{makeModelSEDDictList}.
@rtype: dictionary
@return: results of the fitting - keys:
- 'minChiSq': minimum chi squared value of best fit
- 'chiSqContrib': corresponding contribution at each passband to
the minimum chi squared value
- 'ageGyr': the age in Gyr of the best fitting model
- 'modelFileName': the file name of the stellar population model
corresponding to the best fit
- 'modelListIndex': the index of the best fitting model in the
input modelSEDDictList
- 'norm': the normalisation that the best fit model should be
multiplied by to match the SEDDict
- 'z': the redshift of the best fit model
- 'E(B-V)': the extinction, E(B-V), in magnitudes, of the best fit
model
"""
modelFlux = []
for modelSEDDict in modelSEDDictList:
modelFlux.append(modelSEDDict['flux'])
modelFlux = numpy.array(modelFlux)
sedFlux = numpy.array([SEDDict['flux']] * len(modelSEDDictList))
sedFluxErr = numpy.array([SEDDict['fluxErr']] * len(modelSEDDictList))
# Analytic expression below is for normalisation at minimum chi squared (see note book)
norm = numpy.sum(
(modelFlux * sedFlux) /
(sedFluxErr**2), axis=1) / numpy.sum(modelFlux**2 / sedFluxErr**2,
axis=1)
norms = numpy.array([norm] * modelFlux.shape[1]).transpose()
chiSq = numpy.sum(((sedFlux - norms * modelFlux)**2) / sedFluxErr**2,
axis=1)
chiSq[numpy.isnan(
chiSq)] = 1e6 # throw these out, should check this out and handle more gracefully
minChiSq = chiSq.min()
bestMatchIndex = numpy.equal(chiSq, minChiSq).nonzero()[0][0]
bestNorm = norm[bestMatchIndex]
bestChiSq = minChiSq
bestChiSqContrib = ((sedFlux[bestMatchIndex] - norms[bestMatchIndex] *
modelFlux[bestMatchIndex])**2) /\
sedFluxErr[bestMatchIndex]**2
resultsDict = {'minChiSq': bestChiSq,
'chiSqContrib': bestChiSqContrib,
'allChiSqValues': chiSq,
'ageGyr': modelSEDDictList[bestMatchIndex]['ageGyr'],
'modelFileName':
modelSEDDictList[bestMatchIndex]['fileName'],
'modelListIndex':
modelSEDDictList[bestMatchIndex]['modelListIndex'],
'norm': bestNorm,
'z': modelSEDDictList[bestMatchIndex]['z'],
'E(B-V)': modelSEDDictList[bestMatchIndex]['E(B-V)']}
return resultsDict
#-----------------------------------------------------------------------------
def mags2SEDDict(ABMags, ABMagErrs, passbands):
"""Takes a set of corresponding AB magnitudes, uncertainties, and
passbands, and returns a dictionary with keys 'flux', 'fluxErr'
'wavelength'. Fluxes are in units of erg/s/cm^2/Angstrom, wavelength in
Angstroms. These dictionaries are the staple diet of the L{fitSEDDict}
routine.
@type ABMags: list or numpy array
@param ABMags: AB magnitudes, specified in corresponding order to passbands
and ABMagErrs
@type ABMagErrs: list or numpy array
@param ABMagErrs: AB magnitude errors, specified in corresponding order to
passbands and ABMags
@type passbands: list of L{Passband} objects
@param passbands: passband objects, specified in corresponding order to
ABMags and ABMagErrs
@rtype: dictionary
@return: dictionary with keys {'flux', 'fluxErr', 'wavelength'}, suitable
for input to L{fitSEDDict}
"""
flux = []
fluxErr = []
wavelength = []
for m, e, p in zip(ABMags, ABMagErrs, passbands):
f, err = mag2Flux(m, e, p)
flux.append(f)
fluxErr.append(err)
wavelength.append(p.effectiveWavelength())
SEDDict = {}
SEDDict['flux'] = numpy.array(flux)
SEDDict['fluxErr'] = numpy.array(fluxErr)
SEDDict['wavelength'] = numpy.array(wavelength)
return SEDDict
#-----------------------------------------------------------------------------
def mag2Flux(ABMag, ABMagErr, passband):
"""Converts given AB magnitude and uncertainty into flux, in
erg/s/cm^2/Angstrom.
@type ABMag: float
@param ABMag: magnitude on AB system in passband
@type ABMagErr: float
@param ABMagErr: uncertainty in AB magnitude in passband
@type passband: L{Passband} object
@param passband: L{Passband} object at which ABMag was measured
@rtype: list
@return: [flux, fluxError], in units of erg/s/cm^2/Angstrom
"""
fluxJy = (10**23.0) * 10**(-(ABMag + 48.6) / 2.5) # AB mag
aLambda = 3e-13 # for conversion to erg s-1 cm-2 angstrom-1 with lambda in microns
effLMicron = passband.effectiveWavelength() * (1e-10 / 1e-6)
fluxWLUnits = aLambda * fluxJy / effLMicron**2
fluxJyErr = (10**23.0) * 10**(-(ABMag - ABMagErr + 48.6) / 2.5) # AB mag
fluxWLUnitsErr = aLambda * fluxJyErr / effLMicron**2
fluxWLUnitsErr = fluxWLUnitsErr - fluxWLUnits
return [fluxWLUnits, fluxWLUnitsErr]
#-----------------------------------------------------------------------------
def flux2Mag(flux, fluxErr, passband):
"""Converts given flux and uncertainty in erg/s/cm^2/Angstrom into AB
magnitudes.
@type flux: float
@param flux: flux in erg/s/cm^2/Angstrom in passband
@type fluxErr: float
@param fluxErr: uncertainty in flux in passband, in erg/s/cm^2/Angstrom
@type passband: L{Passband} object
@param passband: L{Passband} object at which ABMag was measured
@rtype: list
@return: [ABMag, ABMagError], in AB magnitudes
"""
# aLambda = 3x10-5 for effective wavelength in angstroms
aLambda = 3e-13 # for conversion to erg s-1 cm-2 angstrom-1 with lambda in microns
effLMicron = passband.effectiveWavelength() * (1e-10 / 1e-6)
fluxJy = (flux * effLMicron**2) / aLambda
mag = -2.5 * numpy.log10(fluxJy / 10**23) - 48.6
fluxErrJy = (fluxErr * effLMicron**2) / aLambda
magErr = mag - (-2.5 * numpy.log10((fluxJy + fluxErrJy) / 10**23) - 48.6)
return [mag, magErr]
#-----------------------------------------------------------------------------
def mag2Jy(ABMag):
"""Converts an AB magnitude into flux density in Jy
@type ABMag: float
@param ABMag: AB magnitude
@rtype: float
@return: flux density in Jy
"""
fluxJy = ((10**23) * 10**(-(float(ABMag) + 48.6) / 2.5))
return fluxJy
#-----------------------------------------------------------------------------
def Jy2Mag(fluxJy):
"""Converts flux density in Jy into AB magnitude
@type fluxJy: float
@param fluxJy: flux density in Jy
@rtype: float
@return: AB magnitude
"""
ABMag = -2.5 * (numpy.log10(fluxJy) - 23.0) - 48.6
return ABMag
#-----------------------------------------------------------------------------
# Data
VEGA = VegaSED()
# AB SED has constant flux density 3631 Jy
AB = SED(wavelength=numpy.logspace(1, 8, 1e5), flux=numpy.ones(int(1e6)))
AB.flux = (3e-5 * 3631) / (AB.wavelength**2)
AB.z0flux = AB.flux[:]
# Solar SED from HST CALSPEC (http://www.stsci.edu/hst/observatory/cdbs/calspec.html)
SOL = SED()
SOL.loadFromFile(astLib.__path__[0] + os.path.sep + "data" + os.path.sep +
"sun_reference_stis_001.ascii")
| lgpl-2.1 |
wkew/FTMSVisualization | 3-HeteroClassPlotter.py | 1 | 10441 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 11:42:36 2016
@author: Will Kew
will.kew@gmail.com
Copyright Will Kew, 2016
This file is part of FTMS Visualisation (also known as i-van Krevelen).
FTMS Visualisation is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
FTMS Visualisation is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with FTMS Visualisation. If not, see <http://www.gnu.org/licenses/>.
This script will read in an assigned peaklist (example input file included) and calculate the heteroatomic class distribution.
The output is a vbar plot of heteroamtic class versus count. You can also have the calculated numbers output in a format for replotting.
This tool uses Seaborn - http://seaborn.pydata.org/
A number of (partially tested) other functions to plot output are included, though commented out.
This tool was used in our recent paper on Scotch Whisky - https://link.springer.com/article/10.1007/s13361-016-1513-y
The prompt for the user about whisky samples is thus borne from this - it also serves as an example of how to customise which classes to include.
"""
from __future__ import print_function # Python 2 compatibility
from __future__ import absolute_import # Python 2 compatibility
import os, sys
import pandas as pd
from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
"""
# We import also the FTMSVizProcessingModule which contains a few useful functions.
# here we define where the scripts are stored.
# Make sure to change this to where you have saved these scripts.
"""
try: #test if running in ipython
__IPYTHON__
except NameError: #if not running in ipython....
import FTMSVizProcessingModule as FTPM
path = os.getcwd()+"data\\" #example data location
else: #if running in ipython
scriptlocation = "/LOCAL/FTMSVis/FTMSVisualization-master/"
sys.path.append(scriptlocation)
import FTMSVizProcessingModule as FTPM
path = "/LOCAL/FTMSVis/data/"
whisky = input("Are these Whisky samples - Y or N?" )
if whisky.upper() == "Y":
whisky = True
else:
whisky = False
inputpath = path +"OutputCSV/"
outputpath = path + "Images/Classes/"
FTPM.make_sure_path_exists(outputpath) #this function checks the output directory exists; if it doesnt, it creates it.
print("Looking for CSVs in " + inputpath)
filesA = os.listdir(inputpath)
filesB = []
for y in filesA:
if y[-8:] =="hits.csv" and y[-10:] != "nohits.csv" and y[-11:] !="isohits.csv":
filesB.append(y)
nfiles = len(filesB)
samplenames=[]
for x in filesB:
samplenames.append(x[:-9])
heteroclasses=[]
for z in filesB:
df1 = pd.read_csv(inputpath+z,index_col=0)
hetclas = df1["HeteroClass"]
hetclaslist = hetclas.tolist()
heteroclasses.append(hetclaslist)
heteroclasses = [item for sublist in heteroclasses for item in sublist]
hetclasset = list(set(heteroclasses))
indexlist = []
for i in samplenames:
for n in range(len(hetclasset)):
indexlist.append(i)
###This section is relevant to my whisky samples
if whisky == True:
columnnames = ["Sample","Class","WoodType","Region","Age","Peated","HeteroClass","HeteroClassCount"]
df4 = pd.read_csv(path+"SampleInfo-Dict.csv",index_col=0)
df4 = df4.T
dict4 = df4.to_dict()
outputdata = pd.DataFrame(index = range(len(indexlist)), columns=columnnames)
a = 0
for y in filesB:
df2 = pd.read_csv(inputpath+y,index_col=0)
counter = Counter(df2["HeteroClass"])
for x in counter:
outputdata.iloc[a][0] = y[:-9]
outputdata.iloc[a][1] = dict4[y[:-9]]["Class"]
outputdata.iloc[a][2] = dict4[y[:-9]]["Total Wood"]
outputdata.iloc[a][3] = dict4[y[:-9]]["Region"]
outputdata.iloc[a][4] = dict4[y[:-9]]["Age"]
outputdata.iloc[a][5] = dict4[y[:-9]]["Peated"]
outputdata.iloc[a][6] = x
outputdata.iloc[a][7] = counter[x]
a = a+1
outputdata = outputdata.dropna(how="all",axis=0)
else:
columnnames = ["Sample","Class","HeteroClass","HeteroClassCount"]
outputdata = pd.DataFrame(index = range(len(indexlist)), columns=columnnames)
a = 0
for y in filesB:
df2 = pd.read_csv(inputpath+y,index_col=0)
counter = Counter(df2["HeteroClass"])
for x in counter:
outputdata.iloc[a][0] = y[:-9]
outputdata.iloc[a][1] = y[:-9] #this is the Class variable, and should be defined as approrpriate for what you're plotting. In the case of single samples, it can be the sample name.
outputdata.iloc[a][2] = x
outputdata.iloc[a][3] = counter[x]
a = a+1
outputdata = outputdata.dropna(how="all",axis=0)
pd.to_numeric(outputdata["HeteroClassCount"],errors="raise")
saveoutputdata = input("Do you want to save the output data in a text file for later re-processing - Y or N? ")
if saveoutputdata.upper() == "Y":
outputdata.to_excel(inputpath+"HetClassByClass-longform.xlsx") #this saves the info out in a longform for plotting.
#outputdata = pd.read_excel(inputpath+"HetClassByClass-longform.xlsx") #this reads that data back in. Only necessary for manually re-running bits of script.
# This section creates a unique, naturally sorted list of heteroatom classes for plotting. Only really works for CHO formula.
# If you have exotic heteroatoms, will need to refigure this yourself, or just hardcode the order you want. easy to do in Excel.
order = outputdata["HeteroClass"].tolist()
order= list(set(order))
order.sort(key=FTPM.natural_sort_key) # this natural sort function ensures a logical order to your barplot.
if whisky == True:
CHOorder = ["O2","O3","O4","O5","O6","O7","O8","O9","O10","O11","O12","O13","O14","O15","O16","O17","O18","O19"]
Fullorder = ["O2","O3","O4","O5","O6","O7","O8","O9","O10","O11","O12","O13","O14","O15","O16","O17","O18",
"O19","O1S1","O2S1","O3S1","O4S1","O5S1","O6S1","O7S1","O8S1","O9S1","O10S1","O11S1","O12S1"]
CHOSorder =["O1S1","O2S1","O3S1","O4S1","O5S1","O6S1","O7S1","O8S1","O9S1","O10S1","O11S1","O12S1"]
CHOSorderNew = ["O2","O3","O4","O5","O6","O7","O8","O9","O10","O11","O12","O13","O14","O15","O16","O17","O18","O19","OnS"]
labels = ["O2","O3","O4","O5","O6","O7","O8","O9","O10","O11","O12","O13","O14","O15","O16","O17","O18","O19",r'O$\mathregular {_n}$S']
else:
df = outputdata
#colours = ["#a6cee3","#1f78b4","#b2df8a"] #colorblind and print friendly colours picked from http://colorbrewer2.org/
colours = ["#1b9e77","#d95f02","#7570b3"] #as above, but brighter
def barplot():
sns.set_style("white")
sns.set_context("paper",font_scale=2)
ax = sns.barplot(x="HeteroClass",y="HeteroClassCount",hue="Class",
data=outputdata,order=order,palette=sns.color_palette(colours))
ax.set(xlabel='Heteroatomic Class', ylabel='Count')
handles, labels = ax.get_legend_handles_labels()
if len(labels) == 1:
ax.legend_.remove()
sns.despine()
fig = ax.get_figure()
plt.xticks(rotation=90)
fig.set_size_inches(8, 6, forward=True)
fig.savefig(outputpath+"Barplot.png",dpi=600,bbox_inches="tight")
fig.savefig(outputpath+"Barplot.eps",dpi=600,bbox_inches="tight")
barplot() #plots a barplot.
"""
# Here are some further examples of the Seaborn Plotting library applied to this problem.
# Most of these rely on having many samples across a small number of classes you wish to compare
def violinplot():
sns.set_style("white")
sns.set_context("paper",font_scale=2)
ax = sns.violinplot(x="HeteroClass",y="HeteroClassCount",hue="Class",data=outputdata,
order=order,
palette=sns.color_palette("bright"),
split=False,bw="silverman",scale_hue=True,scale="width",
cut=2,linewidth=1.5,inner="quartiles",saturation=1)
ax.set(xlabel='Heteroatomic Class', ylabel='Count')
sns.despine()
fig = ax.get_figure()
locs, labels = plt.xticks()
plt.xticks(locs, labels, rotation=90)
cur_ylim = ax.get_ylim()
ax.set_ylim(0,cur_ylim[1])
fig.set_size_inches((POPM.mm2inch(171,80)), forward=True)
fig.savefig(outputpath+"violinplot-scalewidth.png",dpi=600,bbox_inches="tight")
fig.savefig(outputpath+"violinplot-scalewidth.eps",dpi=600,bbox_inches="tight")
def boxplot():
sns.set_style("white")
sns.set_context("paper",font_scale=2)
ax = sns.boxplot(x="HeteroClass",y="HeteroClassCount",hue="Class",data=outputdata,order=order,palette=sns.color_palette("bright"))
ax.set(xlabel='Heteroatomic Class', ylabel='Count')
sns.despine()
fig = ax.get_figure()
plt.xticks(rotation=90)
fig.set_size_inches(8, 6, forward=True)
fig.savefig(outputpath+"Boxplot-comparison-CHO-only.png",dpi=300,bbox_inches="tight")
def swarmplot():
sns.set_style("white")
sns.set_context("paper",font_scale=2)
ax = sns.swarmplot(x="HeteroClass",y="HeteroClassCount",hue="Class",data=outputdata,order=order,palette=sns.color_palette("bright"))
ax.set(xlabel='Heteroatomic Class', ylabel='Average Count')
sns.despine()
fig = ax.get_figure()
plt.xticks(rotation=90)
fig.set_size_inches(8, 6, forward=True)
fig.savefig(outputpath+"swarmplot-comparison-CHO-only.png",dpi=300,bbox_inches="tight")
def stripplot():
sns.set_style("white")
sns.set_context("paper",font_scale=2)
ax = sns.stripplot(x="HeteroClass",y="HeteroClassCount",hue="Class",data=outputdata,order=order,palette=sns.color_palette("bright"),jitter=False,split=True)
ax.set(xlabel='Heteroatomic Class', ylabel='Average Count')
sns.despine()
fig = ax.get_figure()
plt.xticks(rotation=90)
fig.set_size_inches(8, 6, forward=True)
fig.savefig(outputpath+"striplot-comparison-CHO-only.png",dpi=300,bbox_inches="tight")
"""
#EOF | gpl-3.0 |
Fireblend/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
henkhaus/wow | testing/plotter.py | 1 | 1278 | from pymongo import MongoClient
from matplotlib import pyplot as plt
import os
from datetime import datetime, date, time, timedelta
client = MongoClient()
# using wowtest.auctiondata
db = client.wowtest
posts = db.auctiondata
auctions = posts.find().limit(10)
#time.time() into datetime --->
#datetime.datetime.fromtimestamp('xxxx').strftime('%c')
def dt_to_timestamp(dt):
#timestamp = (dt - datetime(1970, 1, 1).total_seconds())
return (int(dt.strftime('%s')))
def getdata(num, quantum):
valid = []
today = datetime.combine(date.today(), time())
for i in range(num+1):
day = today - i*quantum
gte = dt_to_timestamp(day)
lt = dt_to_timestamp(day+quantum)
time_query = {'$gte':gte, '$lt':lt}
valid.insert(0, posts.find({'viewtime':time_query}).count())
return valid
def format_date(x, n):
today = datetime.combine(date.today(), time())
day = today - timedelta(hours=n-x-1)
return day.strftime('%m%d%H')
def plotbar(data, color):
plt.bar(range(len(data)), data, align='center', color=color)
# run
n = 48
val = getdata(n, timedelta(hours=1))
plotbar(val, '#4788d2')
plt.xticks(range(n), [format_date(i, n) for i in range(n)], size='small', rotation=90)
plt.grid(axis='y')
plt.show()
| apache-2.0 |
mikebenfield/scikit-learn | sklearn/neighbors/regression.py | 26 | 10999 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
MLWave/auto-sklearn | autosklearn/wrapper_for_SMAC.py | 5 | 3119 | try:
import cPickle as pickle
except:
import pickle
import os
import time
import signal
import sys
import lockfile
from HPOlibConfigSpace import configuration_space
from autosklearn.data.data_manager import DataManager
import autosklearn.models.holdout_evaluator
from autosklearn.models.paramsklearn import get_class
def store_and_or_load_data(outputdir, dataset, data_dir):
save_path = os.path.join(outputdir, dataset + "_Manager.pkl")
if not os.path.exists(save_path):
lock = lockfile.LockFile(save_path)
while not lock.i_am_locking():
try:
lock.acquire(timeout=60) # wait up to 60 seconds
except lockfile.LockTimeout:
lock.break_lock()
lock.acquire()
print "I locked", lock.path
#It is not yet sure, whether the file already exists
try:
if not os.path.exists(save_path):
D = DataManager(dataset, data_dir, verbose=True)
fh = open(save_path, 'w')
pickle.dump(D, fh, -1)
fh.close()
else:
D = pickle.load(open(save_path, 'r'))
except:
raise
finally:
lock.release()
else:
D = pickle.load(open(save_path, 'r'))
print "Loaded data"
return D
# signal handler seem to work only if they are globally defined
# to give it access to the evaluator class, the evaluator name has to
# be a global name. It's not the cleanest solution, but works for now.
evaluator = None
def signal_handler(signum, frame):
print "Aborting Training!"
global evaluator
evaluator.finish_up()
exit(0)
signal.signal(15, signal_handler)
def main(basename, input_dir, params):
output_dir = os.getcwd()
D = store_and_or_load_data(data_dir=input_dir, dataset=basename,
outputdir=output_dir)
cs = get_class(D.info).get_hyperparameter_search_space()
configuration = configuration_space.Configuration(cs, **params)
global evaluator
evaluator = autosklearn.models.holdout_evaluator.HoldoutEvaluator(
Datamanager=D, configuration=configuration, with_predictions=True,
all_scoring_functions=True, output_dir=output_dir)
evaluator.fit()
evaluator.finish_up()
if __name__ == "__main__":
outer_starttime = time.time()
instance_name = sys.argv[1]
instance_specific_information = sys.argv[2] # = 0
cutoff_time = float(sys.argv[3]) # = inf
cutoff_length = int(float(sys.argv[4])) # = 2147483647
seed = int(float(sys.argv[5]))
params = dict()
for i in range(6, len(sys.argv), 2):
p_name = str(sys.argv[i])
if p_name[0].startswith("-"):
p_name = p_name[1:]
params[p_name] = sys.argv[i + 1].strip()
for key in params:
try:
params[key] = float(params[key])
except:
pass
dataset = os.path.basename(instance_name)
data_dir = os.path.dirname(instance_name)
main(basename=dataset, input_dir=data_dir, params=params)
sys.exit(0)
| bsd-3-clause |
badlogicmanpreet/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/rcsetup.py | 69 | 23344 | """
The rcsetup module contains the default values and the validation code for
customization using matplotlib's rc settings.
Each rc setting is assigned a default value and a function used to validate any
attempted changes to that setting. The default values and validation functions
are defined in the rcsetup module, and are used to construct the rcParams global
object which stores the settings and is referenced throughout matplotlib.
These default values should be consistent with the default matplotlibrc file
that actually reflects the values given here. Any additions or deletions to the
parameter set listed here should also be visited to the
:file:`matplotlibrc.template` in matplotlib's root source directory.
"""
import os
import warnings
from matplotlib.fontconfig_pattern import parse_fontconfig_pattern
from matplotlib.colors import is_color_like
#interactive_bk = ['gtk', 'gtkagg', 'gtkcairo', 'fltkagg', 'qtagg', 'qt4agg',
# 'tkagg', 'wx', 'wxagg', 'cocoaagg']
# The capitalized forms are needed for ipython at present; this may
# change for later versions.
interactive_bk = ['GTK', 'GTKAgg', 'GTKCairo', 'FltkAgg', 'MacOSX',
'QtAgg', 'Qt4Agg', 'TkAgg', 'WX', 'WXAgg', 'CocoaAgg']
non_interactive_bk = ['agg', 'cairo', 'emf', 'gdk',
'pdf', 'ps', 'svg', 'template']
all_backends = interactive_bk + non_interactive_bk
class ValidateInStrings:
def __init__(self, key, valid, ignorecase=False):
'valid is a list of legal strings'
self.key = key
self.ignorecase = ignorecase
def func(s):
if ignorecase: return s.lower()
else: return s
self.valid = dict([(func(k),k) for k in valid])
def __call__(self, s):
if self.ignorecase: s = s.lower()
if s in self.valid: return self.valid[s]
raise ValueError('Unrecognized %s string "%s": valid strings are %s'
% (self.key, s, self.valid.values()))
def validate_path_exists(s):
'If s is a path, return s, else False'
if os.path.exists(s): return s
else:
raise RuntimeError('"%s" should be a path but it does not exist'%s)
def validate_bool(b):
'Convert b to a boolean or raise'
if type(b) is str:
b = b.lower()
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_bool_maybe_none(b):
'Convert b to a boolean or raise'
if type(b) is str:
b = b.lower()
if b=='none': return None
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_float(s):
'convert s to float or raise'
try: return float(s)
except ValueError:
raise ValueError('Could not convert "%s" to float' % s)
def validate_int(s):
'convert s to int or raise'
try: return int(s)
except ValueError:
raise ValueError('Could not convert "%s" to int' % s)
def validate_fonttype(s):
'confirm that this is a Postscript of PDF font type that we know how to convert to'
fonttypes = { 'type3': 3,
'truetype': 42 }
try:
fonttype = validate_int(s)
except ValueError:
if s.lower() in fonttypes.keys():
return fonttypes[s.lower()]
raise ValueError('Supported Postscript/PDF font types are %s' % fonttypes.keys())
else:
if fonttype not in fonttypes.values():
raise ValueError('Supported Postscript/PDF font types are %s' % fonttypes.values())
return fonttype
#validate_backend = ValidateInStrings('backend', all_backends, ignorecase=True)
_validate_standard_backends = ValidateInStrings('backend', all_backends, ignorecase=True)
def validate_backend(s):
if s.startswith('module://'): return s
else: return _validate_standard_backends(s)
validate_numerix = ValidateInStrings('numerix',[
'Numeric','numarray','numpy',
], ignorecase=True)
validate_toolbar = ValidateInStrings('toolbar',[
'None','classic','toolbar2',
], ignorecase=True)
def validate_autolayout(v):
if v:
warnings.warn("figure.autolayout is not currently supported")
class validate_nseq_float:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n floats or raise'
if type(s) is str:
ss = s.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [float(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to floats')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [float(val) for val in s]
class validate_nseq_int:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n ints or raise'
if type(s) is str:
ss = s.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [int(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to ints')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [int(val) for val in s]
def validate_color(s):
'return a valid color arg'
if s.lower() == 'none':
return 'None'
if is_color_like(s):
return s
stmp = '#' + s
if is_color_like(stmp):
return stmp
# If it is still valid, it must be a tuple.
colorarg = s
msg = ''
if s.find(',')>=0:
# get rid of grouping symbols
stmp = ''.join([ c for c in s if c.isdigit() or c=='.' or c==','])
vals = stmp.split(',')
if len(vals)!=3:
msg = '\nColor tuples must be length 3'
else:
try:
colorarg = [float(val) for val in vals]
except ValueError:
msg = '\nCould not convert all entries to floats'
if not msg and is_color_like(colorarg):
return colorarg
raise ValueError('%s does not look like a color arg%s'%(s, msg))
def validate_stringlist(s):
'return a list'
if type(s) is str:
return [ v.strip() for v in s.split(',') ]
else:
assert type(s) in [list,tuple]
return [ str(v) for v in s ]
validate_orientation = ValidateInStrings('orientation',[
'landscape', 'portrait',
])
def validate_aspect(s):
if s in ('auto', 'equal'):
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid aspect specification')
def validate_fontsize(s):
if type(s) is str:
s = s.lower()
if s in ['xx-small', 'x-small', 'small', 'medium', 'large', 'x-large',
'xx-large', 'smaller', 'larger']:
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid font size')
def validate_font_properties(s):
parse_fontconfig_pattern(s)
return s
validate_fontset = ValidateInStrings('fontset', ['cm', 'stix', 'stixsans', 'custom'])
validate_verbose = ValidateInStrings('verbose',[
'silent', 'helpful', 'debug', 'debug-annoying',
])
validate_cairo_format = ValidateInStrings('cairo_format',
['png', 'ps', 'pdf', 'svg'],
ignorecase=True)
validate_ps_papersize = ValidateInStrings('ps_papersize',[
'auto', 'letter', 'legal', 'ledger',
'a0', 'a1', 'a2','a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10',
'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'b10',
], ignorecase=True)
def validate_ps_distiller(s):
if type(s) is str:
s = s.lower()
if s in ('none',None):
return None
elif s in ('false', False):
return False
elif s in ('ghostscript', 'xpdf'):
return s
else:
raise ValueError('matplotlibrc ps.usedistiller must either be none, ghostscript or xpdf')
validate_joinstyle = ValidateInStrings('joinstyle',['miter', 'round', 'bevel'], ignorecase=True)
validate_capstyle = ValidateInStrings('capstyle',['butt', 'round', 'projecting'], ignorecase=True)
validate_negative_linestyle = ValidateInStrings('negative_linestyle',['solid', 'dashed'], ignorecase=True)
def validate_negative_linestyle_legacy(s):
try:
res = validate_negative_linestyle(s)
return res
except ValueError:
dashes = validate_nseq_float(2)(s)
warnings.warn("Deprecated negative_linestyle specification; use 'solid' or 'dashed'")
return (0, dashes) # (offset, (solid, blank))
validate_legend_loc = ValidateInStrings('legend_loc',[
'best',
'upper right',
'upper left',
'lower left',
'lower right',
'right',
'center left',
'center right',
'lower center',
'upper center',
'center',
], ignorecase=True)
class ValidateInterval:
"""
Value must be in interval
"""
def __init__(self, vmin, vmax, closedmin=True, closedmax=True):
self.vmin = vmin
self.vmax = vmax
self.cmin = closedmin
self.cmax = closedmax
def __call__(self, s):
try: s = float(s)
except: raise RuntimeError('Value must be a float; found "%s"'%s)
if self.cmin and s<self.vmin:
raise RuntimeError('Value must be >= %f; found "%f"'%(self.vmin, s))
elif not self.cmin and s<=self.vmin:
raise RuntimeError('Value must be > %f; found "%f"'%(self.vmin, s))
if self.cmax and s>self.vmax:
raise RuntimeError('Value must be <= %f; found "%f"'%(self.vmax, s))
elif not self.cmax and s>=self.vmax:
raise RuntimeError('Value must be < %f; found "%f"'%(self.vmax, s))
return s
# a map from key -> value, converter
defaultParams = {
'backend' : ['Agg', validate_backend], # agg is certainly present
'backend_fallback' : [True, validate_bool], # agg is certainly present
'numerix' : ['numpy', validate_numerix],
'maskedarray' : [False, validate_bool],
'toolbar' : ['toolbar2', validate_toolbar],
'datapath' : [None, validate_path_exists], # handled by _get_data_path_cached
'units' : [False, validate_bool],
'interactive' : [False, validate_bool],
'timezone' : ['UTC', str],
# the verbosity setting
'verbose.level' : ['silent', validate_verbose],
'verbose.fileo' : ['sys.stdout', str],
# line props
'lines.linewidth' : [1.0, validate_float], # line width in points
'lines.linestyle' : ['-', str], # solid line
'lines.color' : ['b', validate_color], # blue
'lines.marker' : ['None', str], # black
'lines.markeredgewidth' : [0.5, validate_float],
'lines.markersize' : [6, validate_float], # markersize, in points
'lines.antialiased' : [True, validate_bool], # antialised (no jaggies)
'lines.dash_joinstyle' : ['miter', validate_joinstyle],
'lines.solid_joinstyle' : ['miter', validate_joinstyle],
'lines.dash_capstyle' : ['butt', validate_capstyle],
'lines.solid_capstyle' : ['projecting', validate_capstyle],
# patch props
'patch.linewidth' : [1.0, validate_float], # line width in points
'patch.edgecolor' : ['k', validate_color], # black
'patch.facecolor' : ['b', validate_color], # blue
'patch.antialiased' : [True, validate_bool], # antialised (no jaggies)
# font props
'font.family' : ['sans-serif', str], # used by text object
'font.style' : ['normal', str], #
'font.variant' : ['normal', str], #
'font.stretch' : ['normal', str], #
'font.weight' : ['normal', str], #
'font.size' : [12.0, validate_float], #
'font.serif' : [['Bitstream Vera Serif', 'DejaVu Serif',
'New Century Schoolbook', 'Century Schoolbook L',
'Utopia', 'ITC Bookman', 'Bookman',
'Nimbus Roman No9 L','Times New Roman',
'Times','Palatino','Charter','serif'],
validate_stringlist],
'font.sans-serif' : [['Bitstream Vera Sans', 'DejaVu Sans',
'Lucida Grande', 'Verdana', 'Geneva', 'Lucid',
'Arial', 'Helvetica', 'Avant Garde', 'sans-serif'],
validate_stringlist],
'font.cursive' : [['Apple Chancery','Textile','Zapf Chancery',
'Sand','cursive'], validate_stringlist],
'font.fantasy' : [['Comic Sans MS','Chicago','Charcoal','Impact'
'Western','fantasy'], validate_stringlist],
'font.monospace' : [['Bitstream Vera Sans Mono', 'DejaVu Sans Mono',
'Andale Mono', 'Nimbus Mono L', 'Courier New',
'Courier','Fixed', 'Terminal','monospace'],
validate_stringlist],
# text props
'text.color' : ['k', validate_color], # black
'text.usetex' : [False, validate_bool],
'text.latex.unicode' : [False, validate_bool],
'text.latex.preamble' : [[''], validate_stringlist],
'text.dvipnghack' : [None, validate_bool_maybe_none],
'text.fontstyle' : ['normal', str],
'text.fontangle' : ['normal', str],
'text.fontvariant' : ['normal', str],
'text.fontweight' : ['normal', str],
'text.fontsize' : ['medium', validate_fontsize],
'mathtext.cal' : ['cursive', validate_font_properties],
'mathtext.rm' : ['serif', validate_font_properties],
'mathtext.tt' : ['monospace', validate_font_properties],
'mathtext.it' : ['serif:italic', validate_font_properties],
'mathtext.bf' : ['serif:bold', validate_font_properties],
'mathtext.sf' : ['sans\-serif', validate_font_properties],
'mathtext.fontset' : ['cm', validate_fontset],
'mathtext.fallback_to_cm' : [True, validate_bool],
'image.aspect' : ['equal', validate_aspect], # equal, auto, a number
'image.interpolation' : ['bilinear', str],
'image.cmap' : ['jet', str], # one of gray, jet, etc
'image.lut' : [256, validate_int], # lookup table
'image.origin' : ['upper', str], # lookup table
'image.resample' : [False, validate_bool],
'contour.negative_linestyle' : ['dashed', validate_negative_linestyle_legacy],
# axes props
'axes.axisbelow' : [False, validate_bool],
'axes.hold' : [True, validate_bool],
'axes.facecolor' : ['w', validate_color], # background color; white
'axes.edgecolor' : ['k', validate_color], # edge color; black
'axes.linewidth' : [1.0, validate_float], # edge linewidth
'axes.titlesize' : ['large', validate_fontsize], # fontsize of the axes title
'axes.grid' : [False, validate_bool], # display grid or not
'axes.labelsize' : ['medium', validate_fontsize], # fontsize of the x any y labels
'axes.labelcolor' : ['k', validate_color], # color of axis label
'axes.formatter.limits' : [[-7, 7], validate_nseq_int(2)],
# use scientific notation if log10
# of the axis range is smaller than the
# first or larger than the second
'axes.unicode_minus' : [True, validate_bool],
'polaraxes.grid' : [True, validate_bool], # display polar grid or not
#legend properties
'legend.fancybox' : [False,validate_bool],
'legend.loc' : ['upper right',validate_legend_loc], # at some point, this should be changed to 'best'
'legend.isaxes' : [True,validate_bool], # this option is internally ignored - it never served any useful purpose
'legend.numpoints' : [2, validate_int], # the number of points in the legend line
'legend.fontsize' : ['large', validate_fontsize],
'legend.pad' : [0, validate_float], # was 0.2, deprecated; the fractional whitespace inside the legend border
'legend.borderpad' : [0.4, validate_float], # units are fontsize
'legend.markerscale' : [1.0, validate_float], # the relative size of legend markers vs. original
# the following dimensions are in axes coords
'legend.labelsep' : [0.010, validate_float], # the vertical space between the legend entries
'legend.handlelen' : [0.05, validate_float], # the length of the legend lines
'legend.handletextsep' : [0.02, validate_float], # the space between the legend line and legend text
'legend.axespad' : [0.02, validate_float], # the border between the axes and legend edge
'legend.shadow' : [False, validate_bool],
'legend.labelspacing' : [0.5, validate_float], # the vertical space between the legend entries
'legend.handlelength' : [2., validate_float], # the length of the legend lines
'legend.handletextpad' : [.8, validate_float], # the space between the legend line and legend text
'legend.borderaxespad' : [0.5, validate_float], # the border between the axes and legend edge
'legend.columnspacing' : [2., validate_float], # the border between the axes and legend edge
'legend.markerscale' : [1.0, validate_float], # the relative size of legend markers vs. original
# the following dimensions are in axes coords
'legend.labelsep' : [0.010, validate_float], # the vertical space between the legend entries
'legend.handlelen' : [0.05, validate_float], # the length of the legend lines
'legend.handletextsep' : [0.02, validate_float], # the space between the legend line and legend text
'legend.axespad' : [0.5, validate_float], # the border between the axes and legend edge
'legend.shadow' : [False, validate_bool],
# tick properties
'xtick.major.size' : [4, validate_float], # major xtick size in points
'xtick.minor.size' : [2, validate_float], # minor xtick size in points
'xtick.major.pad' : [4, validate_float], # distance to label in points
'xtick.minor.pad' : [4, validate_float], # distance to label in points
'xtick.color' : ['k', validate_color], # color of the xtick labels
'xtick.labelsize' : ['medium', validate_fontsize], # fontsize of the xtick labels
'xtick.direction' : ['in', str], # direction of xticks
'ytick.major.size' : [4, validate_float], # major ytick size in points
'ytick.minor.size' : [2, validate_float], # minor ytick size in points
'ytick.major.pad' : [4, validate_float], # distance to label in points
'ytick.minor.pad' : [4, validate_float], # distance to label in points
'ytick.color' : ['k', validate_color], # color of the ytick labels
'ytick.labelsize' : ['medium', validate_fontsize], # fontsize of the ytick labels
'ytick.direction' : ['in', str], # direction of yticks
'grid.color' : ['k', validate_color], # grid color
'grid.linestyle' : [':', str], # dotted
'grid.linewidth' : [0.5, validate_float], # in points
# figure props
# figure size in inches: width by height
'figure.figsize' : [ [8.0,6.0], validate_nseq_float(2)],
'figure.dpi' : [ 80, validate_float], # DPI
'figure.facecolor' : [ '0.75', validate_color], # facecolor; scalar gray
'figure.edgecolor' : [ 'w', validate_color], # edgecolor; white
'figure.autolayout' : [ False, validate_autolayout],
'figure.subplot.left' : [0.125, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.right' : [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.bottom' : [0.1, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.top' : [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.wspace' : [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)],
'figure.subplot.hspace' : [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)],
'savefig.dpi' : [100, validate_float], # DPI
'savefig.facecolor' : ['w', validate_color], # facecolor; white
'savefig.edgecolor' : ['w', validate_color], # edgecolor; white
'savefig.orientation' : ['portrait', validate_orientation], # edgecolor; white
'cairo.format' : ['png', validate_cairo_format],
'tk.window_focus' : [False, validate_bool], # Maintain shell focus for TkAgg
'tk.pythoninspect' : [False, validate_bool], # Set PYTHONINSPECT
'ps.papersize' : ['letter', validate_ps_papersize], # Set the papersize/type
'ps.useafm' : [False, validate_bool], # Set PYTHONINSPECT
'ps.usedistiller' : [False, validate_ps_distiller], # use ghostscript or xpdf to distill ps output
'ps.distiller.res' : [6000, validate_int], # dpi
'ps.fonttype' : [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'pdf.compression' : [6, validate_int], # compression level from 0 to 9; 0 to disable
'pdf.inheritcolor' : [False, validate_bool], # ignore any color-setting commands from the frontend
'pdf.use14corefonts' : [False, validate_bool], # use only the 14 PDF core fonts
# embedded in every PDF viewing application
'pdf.fonttype' : [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'svg.image_inline' : [True, validate_bool], # write raster image data directly into the svg file
'svg.image_noscale' : [False, validate_bool], # suppress scaling of raster data embedded in SVG
'svg.embed_char_paths' : [True, validate_bool], # True to save all characters as paths in the SVG
'docstring.hardcopy' : [False, validate_bool], # set this when you want to generate hardcopy docstring
'plugins.directory' : ['.matplotlib_plugins', str], # where plugin directory is locate
'path.simplify' : [False, validate_bool],
'agg.path.chunksize' : [0, validate_int] # 0 to disable chunking;
# recommend about 20000 to
# enable. Experimental.
}
if __name__ == '__main__':
rc = defaultParams
rc['datapath'][0] = '/'
for key in rc:
if not rc[key][1](rc[key][0]) == rc[key][0]:
print "%s: %s != %s"%(key, rc[key][1](rc[key][0]), rc[key][0])
| agpl-3.0 |
williamdlees/TRIgS | PlotIdentity.py | 2 | 6306 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Using BLAST, create a CSV file that lists the % identity of the specified sequence to all sequences from the specified germline
__author__ = 'William Lees'
__docformat__ = "restructuredtext en"
import os.path
import sys
import argparse
import csv
import re
import subprocess
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import pairwise2
from Bio.Alphabet import generic_nucleotide
from Bio import SeqIO
from Bio import Phylo
from itertools import izip
def main(argv):
parser = argparse.ArgumentParser(description='Create an Identity/Divergence plot.')
parser.add_argument('repertoire', help='file containing repertoire sequence identities (CSV)')
parser.add_argument('-a', '--adjust', help='Adjust labels to prevent overlap (requires package adjustText)', action='store_true')
parser.add_argument('-b', '--bar', help='Plot a colour bar', action='store_true')
parser.add_argument('-c', '--colourmap', help='colourmap')
parser.add_argument('-g', '--background', help='Set the contour colourwhere the density is zero')
parser.add_argument('-mx', '--maxx', help='max divergence value to show')
parser.add_argument('-my', '--miny', help='min identity value to show')
parser.add_argument('-p', '--points', help='comma-seperated list of identity files and formats')
parser.add_argument('-s', '--save', help='Save output to file (as opposed to interactive display)')
args = parser.parse_args()
if args.adjust:
from adjustText import adjust_text
colourmap = args.colourmap if args.colourmap else 'hot_r'
plist = args.points.split(',')
points = []
repertoire = read_file(args.repertoire)
def pairwise(iterable):
a = iter(iterable)
return izip(a, a)
if len(plist) > 0:
try:
for file, format in pairwise(plist):
points.append((read_file(file), format))
except IOError:
print 'file "%s" cannot be opened.' % file
except:
print '"points" must consist of pairs of files and formats.'
quit()
max_divergence = int(args.maxx) if args.maxx else None
min_identity = int(args.miny) if args.miny else None
savefile = args.save if args.save else None
if not max_divergence:
max_divergence = max(repertoire['GermlineDist'])
for point in points:
max_divergence = max(max_divergence, max(point[0]['GermlineDist']))
max_divergence = int(max_divergence) + 1
if not min_identity:
min_identity = min(repertoire['TargetDist'])
for point in points:
min_identity = min(min_identity, min(point[0]['TargetDist']))
min_identity = int(min_identity)
H, yedges, xedges = np.histogram2d(repertoire['TargetDist'], repertoire['GermlineDist'], bins=[101-min_identity, max_divergence+1], range=[[min_identity, 101], [-1, max_divergence]], normed=False)
# For alternative interpolations and plots, see http://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram2d.html
# For colour maps, see http://matplotlib.org/examples/color/colormaps_reference.html
fig = plt.figure()
cm = plt.cm.get_cmap(colourmap)
if args.background:
cm.set_under(color=args.background)
ax = fig.add_subplot(1,1,1)
im = plt.imshow(H, interpolation='bilinear', origin='low', extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]], vmin=0.0000001, cmap=cm)
ax.set_xlim(xedges[0], xedges[-1])
ax.set_ylim(yedges[0], yedges[-1])
if args.bar:
cb = plt.colorbar(im, shrink=0.8, extend='neither')
cb.ax.set_ylabel('sequences', rotation=90)
texts = []
for point in points:
df, format = point
markersize = 5
label = False
labelcolour = 'black'
fmt = format.split('/')
format = fmt[0]
for f in fmt[1:]:
if f[0] == 'm':
markersize = int(f[1:])
elif f[0] == 'l':
label = True
if len(f) > 1:
labelcolour = f[1:]
else:
print 'Unrecognised format string: %s' % format
for index, row in df.iterrows():
if label:
if args.adjust:
texts.append(plt.text(row['GermlineDist'], row['TargetDist'], row['SequenceId'], bbox={'pad':0, 'alpha':0}, fontdict={ 'color': labelcolour}))
else:
texts.append(plt.text(row['GermlineDist'] + 0.2, row['TargetDist'] - 0.2, row['SequenceId'], bbox={'pad':0, 'alpha':0}, fontdict={ 'color': labelcolour}))
ax.plot(row['GermlineDist'], row['TargetDist'], format, markersize=markersize)
if args.adjust:
adjust_text(texts)
plt.xlabel('Germline Divergence (%)')
plt.ylabel('Target Ab Identity (%)')
if savefile:
plt.savefig(savefile)
else:
plt.show()
def read_file(file):
df = pd.read_csv(file, converters={'SequenceId': lambda x: x})
for key in ("SequenceId", "TargetDist", "GermlineDist"):
if key not in df.keys():
print 'File %s does not contain a column "%s"' % (file, key)
quit()
for index, row in df.iterrows():
try:
x = row[1] * row[2] # check they behave like numbers
except:
print 'Error in file %s: malformed row at %s.' % (file, row[0])
if len(df) < 1:
print '%s: empty file.' % file
quit()
return df
if __name__=="__main__":
main(sys.argv)
| mit |
banesullivan/ParaViewGeophysics | PVGeo/ubc/tensor.py | 1 | 21910 | __all__ = [
'TensorMeshReader',
'TensorMeshAppender',
'TopoMeshAppender',
]
__displayname__ = 'Tensor Mesh'
import os
import sys
import numpy as np
import pandas as pd
import vtk
from .. import _helpers, interface
from ..base import AlgorithmBase
from .two_file_base import ModelAppenderBase, ubcMeshReaderBase
if sys.version_info < (3,):
from StringIO import StringIO
else:
from io import StringIO
class TensorMeshReader(ubcMeshReaderBase):
"""UBC Mesh 2D/3D models are defined using a 2-file format. The "mesh" file
describes how the data is discretized. The "model" file lists the physical
property values for all cells in a mesh. A model file is meaningless without
an associated mesh file. The reader will automatically detect if the mesh is
2D or 3D and read the remainder of the data with that dimensionality
assumption. If the mesh file is 2D, then then model file must also be in the
2D format (same for 3D).
Note:
Model File is optional. Reader will still construct
``vtkRectilinearGrid`` safely.
"""
__displayname__ = 'UBC Tensor Mesh Reader'
__category__ = 'reader'
description = 'PVGeo: UBC Mesh 2D/3D Two-File Format'
def __init__(self, nOutputPorts=1, outputType='vtkRectilinearGrid', **kwargs):
ubcMeshReaderBase.__init__(
self, nOutputPorts=nOutputPorts, outputType=outputType, **kwargs
)
self.__mesh = vtk.vtkRectilinearGrid()
self.__models = []
@staticmethod
def place_model_on_mesh(mesh, model, data_name='Data'):
"""Places model data onto a mesh. This is for the UBC Grid data reaers
to associate model data with the mesh grid.
Args:
mesh (vtkRectilinearGrid): The ``vtkRectilinearGrid`` that is the
mesh to place the model data upon.
model (np.array): A NumPy float array that holds all of the data to
place inside of the mesh's cells.
data_name (str) : The name of the model data array once placed on the
``vtkRectilinearGrid``.
Return:
vtkRectilinearGrid :
Returns the input ``vtkRectilinearGrid`` with model data appended.
"""
if isinstance(model, dict):
for key in model.keys():
TensorMeshReader.place_model_on_mesh(mesh, model[key], data_name=key)
return mesh
# model.GetNumberOfValues() if model is vtkDataArray
# Make sure this model file fits the dimensions of the mesh
ext = mesh.GetExtent()
n1, n2, n3 = ext[1], ext[3], ext[5]
if n1 * n2 * n3 < len(model):
raise _helpers.PVGeoError(
'Model `%s` has more data than the given mesh has cells to hold.'
% data_name
)
elif n1 * n2 * n3 > len(model):
raise _helpers.PVGeoError(
'Model `%s` does not have enough data to fill the given mesh\'s cells.'
% data_name
)
# Swap axes because VTK structures the coordinates a bit differently
# - This is absolutely crucial!
# - Do not play with unless you know what you are doing!
if model.ndim > 1 and model.ndim < 3:
ncomp = model.shape[1]
model = np.reshape(model, (n1, n2, n3, ncomp))
model = np.swapaxes(model, 0, 1)
model = np.swapaxes(model, 0, 2)
# Now reverse Z axis
model = model[::-1, :, :, :] # Note it is in Fortran ordering
model = np.reshape(model, (n1 * n2 * n3, ncomp))
else:
model = np.reshape(model, (n1, n2, n3))
model = np.swapaxes(model, 0, 1)
model = np.swapaxes(model, 0, 2)
# Now reverse Z axis
model = model[::-1, :, :] # Note it is in Fortran ordering
model = model.flatten()
# Convert data to VTK data structure and append to output
c = interface.convert_array(model, name=data_name, deep=True)
# THIS IS CELL DATA! Add the model data to CELL data:
mesh.GetCellData().AddArray(c)
return mesh
# ------------------------------------------------------------------#
# ---------------------- UBC MESH 2D ------------------------#
# ------------------------------------------------------------------#
@staticmethod
def ubc_mesh_2d(FileName, output):
"""This method reads a UBC 2D Mesh file and builds an empty
``vtkRectilinearGrid`` for data to be inserted into. `Format Specs`_.
.. _Format Specs: http://giftoolscookbook.readthedocs.io/en/latest/content/fileFormats/mesh2Dfile.html
Args:
FileName (str) : The mesh filename as an absolute path for the input
mesh file in UBC 3D Mesh Format.
output (vtkRectilinearGrid) : The output data object
Return:
vtkRectilinearGrid :
a ``vtkRectilinearGrid`` generated from the UBC 3D Mesh grid.
Mesh is defined by the input mesh file.
No data attributes here, simply an empty mesh. Use the
``place_model_on_mesh()`` method to associate with model data.
"""
# Read in data from file
xpts, xdisc, zpts, zdisc = ubcMeshReaderBase._ubc_mesh_2d_part(FileName)
nx = np.sum(np.array(xdisc, dtype=int)) + 1
nz = np.sum(np.array(zdisc, dtype=int)) + 1
# Now generate the vtkRectilinear Grid
def _genCoords(pts, disc, z=False):
c = [float(pts[0])]
for i in range(len(pts) - 1):
start = float(pts[i])
stop = float(pts[i + 1])
num = int(disc[i])
w = (stop - start) / num
for j in range(1, num):
c.append(start + (j) * w)
c.append(stop)
c = np.array(c, dtype=float)
if z:
c = -c[::-1]
return interface.convert_array(c, deep=True)
xcoords = _genCoords(xpts, xdisc)
zcoords = _genCoords(zpts, zdisc, z=True)
ycoords = interface.convert_array(np.zeros(1), deep=True)
output.SetDimensions(nx, 2, nz) # note this subtracts 1
output.SetXCoordinates(xcoords)
output.SetYCoordinates(ycoords)
output.SetZCoordinates(zcoords)
return output
@staticmethod
def ubc_model_2d(FileName):
"""Reads a 2D model file and returns a 1D NumPy float array. Use the
``place_model_on_mesh()`` method to associate with a grid.
Note:
Only supports single component data
Args:
FileName (str) : The model filename as an absolute path for the
input model file in UBCMesh Model Format. Also accepts a list of
string file names.
Return:
np.array :
a NumPy float array that holds the model data read from
the file. Use the ``place_model_on_mesh()`` method to associate
with a grid. If a list of file names is given then it will
return a dictionary of NumPy float array with keys as the
basenames of the files.
"""
if isinstance(FileName, (list, tuple)):
out = {}
for f in FileName:
out[os.path.basename(f)] = TensorMeshReader.ubc_model_2d(f)
return out
dim = np.genfromtxt(
FileName, dtype=int, delimiter=None, comments='!', max_rows=1
)
names = ['col%d' % i for i in range(dim[0])]
df = pd.read_csv(
FileName, names=names, delim_whitespace=True, skiprows=1, comment='!'
)
data = df.values
if np.shape(data)[0] != dim[1] and np.shape(data)[1] != dim[0]:
raise _helpers.PVGeoError('Mode file `%s` improperly formatted.' % FileName)
return data.flatten(order='F')
def __ubc_mesh_data_2d(self, filename_mesh, filename_models, output):
"""Helper method to read a 2D mesh"""
# Construct/read the mesh
if self.need_to_readMesh():
TensorMeshReader.ubc_mesh_2d(filename_mesh, self.__mesh)
self.need_to_readMesh(flag=False)
output.DeepCopy(self.__mesh)
if self.need_to_readModels() and self.this_has_models():
self.__models = []
for f in filename_models:
# Read the model data
self.__models.append(TensorMeshReader.ubc_model_2d(f))
self.need_to_readModels(flag=False)
return output
# ------------------------------------------------------------------#
# ---------------------- UBC MESH 3D ------------------------#
# ------------------------------------------------------------------#
@staticmethod
def ubc_mesh_3d(FileName, output):
"""This method reads a UBC 3D Mesh file and builds an empty
``vtkRectilinearGrid`` for data to be inserted into.
Args:
FileName (str) : The mesh filename as an absolute path for the input
mesh file in UBC 3D Mesh Format.
output (vtkRectilinearGrid) : The output data object
Return:
vtkRectilinearGrid :
a ``vtkRectilinearGrid`` generated from the UBC 3D Mesh grid.
Mesh is defined by the input mesh file.
No data attributes here, simply an empty mesh. Use the
``place_model_on_mesh()`` method to associate with model data.
"""
# --- Read in the mesh ---#
fileLines = np.genfromtxt(FileName, dtype=str, delimiter='\n', comments='!')
# Get mesh dimensions
dim = np.array(fileLines[0].split('!')[0].split(), dtype=int)
dim = (dim[0] + 1, dim[1] + 1, dim[2] + 1)
# The origin corner (Southwest-top)
# - Remember UBC format specifies down as the positive Z
# - Easting, Northing, Altitude
oo = np.array(fileLines[1].split('!')[0].split(), dtype=float)
ox, oy, oz = oo[0], oo[1], oo[2]
# Read cell sizes for each line in the UBC mesh files
def _readCellLine(line):
line_list = []
for seg in line.split():
if '*' in seg:
sp = seg.split('*')
seg_arr = np.ones((int(sp[0]),), dtype=float) * float(sp[1])
else:
seg_arr = np.array([float(seg)], dtype=float)
line_list.append(seg_arr)
return np.concatenate(line_list)
# Read the cell sizes
cx = _readCellLine(fileLines[2].split('!')[0])
cy = _readCellLine(fileLines[3].split('!')[0])
cz = _readCellLine(fileLines[4].split('!')[0])
# Invert the indexing of the vector to start from the bottom.
cz = cz[::-1]
# Adjust the reference point to the bottom south west corner
oz = oz - np.sum(cz)
# Now generate the coordinates for from cell width and origin
cox = ox + np.cumsum(cx)
cox = np.insert(cox, 0, ox)
coy = oy + np.cumsum(cy)
coy = np.insert(coy, 0, oy)
coz = oz + np.cumsum(cz)
coz = np.insert(coz, 0, oz)
# Set the dims and coordinates for the output
output.SetDimensions(dim[0], dim[1], dim[2])
# Convert to VTK array for setting coordinates
output.SetXCoordinates(interface.convert_array(cox, deep=True))
output.SetYCoordinates(interface.convert_array(coy, deep=True))
output.SetZCoordinates(interface.convert_array(coz, deep=True))
return output
def __ubc_mesh_data_3d(self, filename_mesh, filename_models, output):
"""Helper method to read a 3D mesh"""
# Construct/read the mesh
if self.need_to_readMesh():
TensorMeshReader.ubc_mesh_3d(filename_mesh, self.__mesh)
self.need_to_readMesh(flag=False)
output.DeepCopy(self.__mesh)
if self.need_to_readModels() and self.this_has_models():
self.__models = []
for f in filename_models:
# Read the model data
self.__models.append(TensorMeshReader.ubc_model_3d(f))
self.need_to_readModels(flag=False)
return output
def __ubc_tensor_mesh(self, filename_mesh, filename_models, output):
"""Wrapper to Read UBC GIF 2D and 3D meshes. UBC Mesh 2D/3D models are
defined using a 2-file format. The "mesh" file describes how the data is
descritized. The "model" file lists the physical property values for all
cells in a mesh. A model file is meaningless without an associated mesh
file. If the mesh file is 2D, then then model file must also be in the
2D format (same for 3D).
Args:
filename_mesh (str) : The mesh filename as an absolute path for the
input mesh file in UBC 2D/3D Mesh Format
filename_models (str or list(str)) : The model filename(s) as an
absolute path for the input model file in UBC 2D/3D Model Format.
output (vtkRectilinearGrid) : The output data object
Return:
vtkRectilinearGrid :
a ``vtkRectilinearGrid`` generated from the UBC 2D/3D Mesh grid.
Mesh is defined by the input mesh file.
Cell data is defined by the input model file.
"""
# Check if the mesh is a UBC 2D mesh
if self.is_2d():
self.__ubc_mesh_data_2d(filename_mesh, filename_models, output)
# Check if the mesh is a UBC 3D mesh
elif self.is_3d():
self.__ubc_mesh_data_3d(filename_mesh, filename_models, output)
else:
raise _helpers.PVGeoError('File format not recognized')
return output
def RequestData(self, request, inInfo, outInfo):
"""Handles data request by the pipeline."""
# Get output:
output = self.GetOutputData(outInfo, 0)
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
self.__ubc_tensor_mesh(
self.get_mesh_filename(), self.get_model_filenames(), output
)
# Place the model data for given timestep onto the mesh
if len(self.__models) > i:
TensorMeshReader.place_model_on_mesh(
output, self.__models[i], self.get_data_name()
)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Handles info request by pipeline about timesteps and grid extents."""
# Call parent to handle time stuff
ubcMeshReaderBase.RequestInformation(self, request, inInfo, outInfo)
# Now set whole output extent
if self.need_to_readMesh():
ext = self._read_extent()
info = outInfo.GetInformationObject(0)
# Set WHOLE_EXTENT: This is absolutely necessary
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), ext, 6)
return 1
def clear_mesh(self):
"""Use to clean/rebuild the mesh"""
self.__mesh = vtk.vtkRectilinearGrid()
ubcMeshReaderBase.clear_models(self)
def clear_models(self):
"""Use to clean the models and reread"""
self.__models = []
ubcMeshReaderBase.clear_models(self)
###############################################################################
class TensorMeshAppender(ModelAppenderBase):
"""This filter reads a timeseries of models and appends it to an input
``vtkRectilinearGrid``
"""
__displayname__ = 'UBC Tensor Mesh Appender'
__category__ = 'filter'
def __init__(self, **kwargs):
ModelAppenderBase.__init__(
self,
inputType='vtkRectilinearGrid',
outputType='vtkRectilinearGrid',
**kwargs
)
def _read_up_front(self):
"""Internal helepr to read data at start"""
reader = ubcMeshReaderBase.ubc_model_3d
if not self._is_3D:
# Note how in UBC format, 2D grids are specified on an XZ plane (no Y component)
# This will only work prior to rotations to account for real spatial reference
reader = TensorMeshReader.ubc_model_2d
self._models = []
for f in self._model_filenames:
# Read the model data
self._models.append(reader(f))
self.need_to_read(flag=False)
return
def _place_on_mesh(self, output, idx=0):
"""Internal helepr to place a model on the mesh for a given index"""
TensorMeshReader.place_model_on_mesh(
output, self._models[idx], self.get_data_name()
)
return
###############################################################################
class TopoMeshAppender(AlgorithmBase):
"""This filter reads a single discrete topography file and appends it as a
boolean data array.
"""
__displayname__ = 'Append UBC Discrete Topography'
__category__ = 'filter'
def __init__(
self, inputType='vtkRectilinearGrid', outputType='vtkRectilinearGrid', **kwargs
):
AlgorithmBase.__init__(
self,
nInputPorts=1,
inputType=inputType,
nOutputPorts=1,
outputType=outputType,
)
self._topoFileName = kwargs.get('filename', None)
self.__indices = None
self.__need_to_read = True
self.__ne, self.__nn = None, None
def need_to_read(self, flag=None):
"""Ask self if the reader needs to read the files again
Args:
flag (bool): if the flag is set then this method will set the read
status
Return:
bool:
The status of the reader aspect of the filter.
"""
if flag is not None and isinstance(flag, (bool, int)):
self.__need_to_read = flag
return self.__need_to_read
def Modified(self, read_again=True):
"""Call modified if the files needs to be read again again."""
if read_again:
self.__need_to_read = read_again
AlgorithmBase.Modified(self)
def modified(self, read_again=True):
"""Call modified if the files needs to be read again again."""
return self.Modified(read_again=read_again)
def _read_up_front(self):
"""Internal helepr to read data at start"""
# Read the file
content = np.genfromtxt(
self._topoFileName, dtype=str, delimiter='\n', comments='!'
)
dim = content[0].split()
self.__ne, self.__nn = int(dim[0]), int(dim[1])
self.__indices = pd.read_csv(
StringIO("\n".join(content[1::])),
names=['i', 'j', 'k'],
delim_whitespace=True,
)
# NOTE: K indices are inverted
self.need_to_read(flag=False)
return
def _place_on_mesh(self, output):
"""Internal helepr to place an active cells model on the mesh"""
# Check mesh extents to math topography
nx, ny, nz = output.GetDimensions()
nx, ny, nz = nx - 1, ny - 1, nz - 1 # because GetDimensions counts the nodes
topz = np.max(self.__indices['k']) + 1
if nx != self.__nn or ny != self.__ne or topz > nz:
raise _helpers.PVGeoError(
'Dimension mismatch between input grid and topo file.'
)
# # Adjust the k indices to be in caarteian system
# self.__indices['k'] = nz - self.__indices['k']
# Fill out the topo and add it as model as it will be in UBC format
# Create a 3D array of 1s and zeros (1 means beneath topo or active)
topo = np.empty((ny, nx, nz), dtype=float)
topo[:] = np.nan
for row in self.__indices.values:
i, j, k = row
topo[i, j, k + 1 :] = 0
topo[i, j, : k + 1] = 1
# Add as model... ``place_model_on_mesh`` handles the rest
TensorMeshReader.place_model_on_mesh(
output, topo.flatten(), 'Active Topography'
)
return
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
output = self.GetOutputData(outInfo, 0)
output.DeepCopy(pdi) # ShallowCopy if you want changes to propagate upstream
# Perfrom task:
if self.__need_to_read:
self._read_up_front()
# Place the model data for given timestep onto the mesh
self._place_on_mesh(output)
return 1
#### Setters and Getters ####
def clear_topo_file(self):
"""Use to clear data file name."""
self._topoFileName = None
self.Modified(read_again=True)
def set_topo_filename(self, filename):
"""Use to set the file names for the reader. Handles single strings only"""
if filename is None:
return # do nothing if None is passed by a constructor on accident
elif isinstance(filename, str) and self._topoFileName != filename:
self._topoFileName = filename
self.Modified()
return 1
###############################################################################
#
# import numpy as np
# indices = np.array([[0,0,1],
# [0,1,1],
# [0,2,1],
# [1,0,1],
# [1,1,1],
# [1,2,1],
# [2,0,1],
# [2,1,1],
# [2,2,1],
# ])
#
# topo = np.empty((3,3,3), dtype=float)
# topo[:] = np.nan
#
# for row in indices:
# i, j, k = row
# topo[i, j, k:] = 0
# topo[i, j, :k] = 1
# topo
| bsd-3-clause |
jseabold/scikit-learn | sklearn/feature_selection/rfe.py | 4 | 15662 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Vincent Michel <vincent.michel@inria.fr>
# Gilles Louppe <g.louppe@gmail.com>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..model_selection import check_cv
from ..model_selection._validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features // 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
# Initialization
cv = check_cv(self.cv, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv.split(X, y)):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select, step=self.step)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to get_n_splits(X, y) - 1
# here, the scores are normalized by get_n_splits(X, y)
self.grid_scores_ = scores / cv.get_n_splits(X, y)
return self
| bsd-3-clause |
cuemacro/chartpy | chartpy_examples/subplot_example.py | 1 | 2359 | __author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
import pandas
# support Quandl 3.x.x
try:
import quandl as Quandl
except:
# if import fails use Quandl 2.x.x
import Quandl
from chartpy import Chart, Style
# get your own free bQuandl API key from https://www.quandl.com/
try:
from chartpy.chartcred import ChartCred
cred = ChartCred()
quandl_api_key = cred.quandl_api_key
except:
quandl_api_key = "x"
# choose run_example = 0 for everything
# run_example = 1 - plot US GDP QoQ (real) and nominal with Plotly/Bokeh/Matplotlib with subplots for each line
# run_example = 2 - plot US GDP QoQ (real + nominal) in two double plots (passing an array of dataframes)
run_example = 0
if run_example == 1 or run_example == 0:
df = Quandl.get(["FRED/A191RL1Q225SBEA", "FRED/A191RP1Q027SBEA"], authtoken=quandl_api_key)
df.columns = ["Real QoQ", "Nominal QoQ"]
# set the style of the plot
style = Style(title="US GDP", source="Quandl/Fred", subplots=True)
# Chart object is initialised with the dataframe and our chart style
chart = Chart(df=df, chart_type='line', style=style)
chart.plot(engine='matplotlib')
chart.plot(engine='bokeh')
chart.plot(engine='plotly')
if run_example == 2 or run_example == 0:
df = Quandl.get(["FRED/A191RL1Q225SBEA", "FRED/A191RP1Q027SBEA"], authtoken=quandl_api_key)
df.columns = ["Real QoQ", "Nominal QoQ"]
df = [df, df]
# set the style of the plot
style = Style(title="US GDP double plot", source="Quandl/Fred", subplots=True)
# Chart object is initialised with the dataframe and our chart style
chart = Chart(df=df, chart_type='line', style=style)
chart.plot(engine='bokeh')
chart.plot(engine='matplotlib')
chart.plot(engine='plotly') # TODO fix legends though
| apache-2.0 |
nvoron23/statsmodels | statsmodels/sandbox/examples/try_multiols.py | 33 | 1243 | # -*- coding: utf-8 -*-
"""
Created on Sun May 26 13:23:40 2013
Author: Josef Perktold, based on Enrico Giampieri's multiOLS
"""
#import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.sandbox.multilinear import multiOLS, multigroup
data = sm.datasets.longley.load_pandas()
df = data.exog
df['TOTEMP'] = data.endog
#This will perform the specified linear model on all the
#other columns of the dataframe
res0 = multiOLS('GNP + 1', df)
#This select only a certain subset of the columns
res = multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP'])
print(res.to_string())
url = "http://vincentarelbundock.github.com/"
url = url + "Rdatasets/csv/HistData/Guerry.csv"
df = pd.read_csv(url, index_col=1) #'dept')
#evaluate the relationship between the various parameters whith the Wealth
pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']
#define the groups
groups = {}
groups['crime'] = ['Crime_prop', 'Infanticide',
'Crime_parents', 'Desertion', 'Crime_pers']
groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations']
groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy']
#do the analysis of the significance
res3 = multigroup(pvals < 0.05, groups)
print(res3)
| bsd-3-clause |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/skimage/viewer/utils/core.py | 19 | 6555 | import numpy as np
from ..qt import QtWidgets, has_qt, FigureManagerQT, FigureCanvasQTAgg
from ..._shared.utils import warn
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib import _pylab_helpers
from matplotlib.colors import LinearSegmentedColormap
if has_qt and 'agg' not in mpl.get_backend().lower():
warn("Recommended matplotlib backend is `Agg` for full "
"skimage.viewer functionality.")
__all__ = ['init_qtapp', 'start_qtapp', 'RequiredAttr', 'figimage',
'LinearColormap', 'ClearColormap', 'FigureCanvas', 'new_plot',
'update_axes_image']
QApp = None
def init_qtapp():
"""Initialize QAppliction.
The QApplication needs to be initialized before creating any QWidgets
"""
global QApp
QApp = QtWidgets.QApplication.instance()
if QApp is None:
QApp = QtWidgets.QApplication([])
return QApp
def is_event_loop_running(app=None):
"""Return True if event loop is running."""
if app is None:
app = init_qtapp()
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
return False
def start_qtapp(app=None):
"""Start Qt mainloop"""
if app is None:
app = init_qtapp()
if not is_event_loop_running(app):
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
else:
app._in_event_loop = True
class RequiredAttr(object):
"""A class attribute that must be set before use."""
instances = dict()
def __init__(self, init_val=None):
self.instances[self, None] = init_val
def __get__(self, obj, objtype):
value = self.instances[self, obj]
if value is None:
raise AttributeError('Required attribute not set')
return value
def __set__(self, obj, value):
self.instances[self, obj] = value
class LinearColormap(LinearSegmentedColormap):
"""LinearSegmentedColormap in which color varies smoothly.
This class is a simplification of LinearSegmentedColormap, which doesn't
support jumps in color intensities.
Parameters
----------
name : str
Name of colormap.
segmented_data : dict
Dictionary of 'red', 'green', 'blue', and (optionally) 'alpha' values.
Each color key contains a list of `x`, `y` tuples. `x` must increase
monotonically from 0 to 1 and corresponds to input values for a
mappable object (e.g. an image). `y` corresponds to the color
intensity.
"""
def __init__(self, name, segmented_data, **kwargs):
segmented_data = dict((key, [(x, y, y) for x, y in value])
for key, value in segmented_data.items())
LinearSegmentedColormap.__init__(self, name, segmented_data, **kwargs)
class ClearColormap(LinearColormap):
"""Color map that varies linearly from alpha = 0 to 1
"""
def __init__(self, rgb, max_alpha=1, name='clear_color'):
r, g, b = rgb
cg_speq = {'blue': [(0.0, b), (1.0, b)],
'green': [(0.0, g), (1.0, g)],
'red': [(0.0, r), (1.0, r)],
'alpha': [(0.0, 0.0), (1.0, max_alpha)]}
LinearColormap.__init__(self, name, cg_speq)
class FigureCanvas(FigureCanvasQTAgg):
"""Canvas for displaying images."""
def __init__(self, figure, **kwargs):
self.fig = figure
FigureCanvasQTAgg.__init__(self, self.fig)
FigureCanvasQTAgg.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvasQTAgg.updateGeometry(self)
def resizeEvent(self, event):
FigureCanvasQTAgg.resizeEvent(self, event)
# Call to `resize_event` missing in FigureManagerQT.
# See https://github.com/matplotlib/matplotlib/pull/1585
self.resize_event()
def new_canvas(*args, **kwargs):
"""Return a new figure canvas."""
allnums = _pylab_helpers.Gcf.figs.keys()
num = max(allnums) + 1 if allnums else 1
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvas(figure)
fig_manager = FigureManagerQT(canvas, num)
return fig_manager.canvas
def new_plot(parent=None, subplot_kw=None, **fig_kw):
"""Return new figure and axes.
Parameters
----------
parent : QtWidget
Qt widget that displays the plot objects. If None, you must manually
call ``canvas.setParent`` and pass the parent widget.
subplot_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure.add_subplot``.
fig_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure``.
"""
if subplot_kw is None:
subplot_kw = {}
canvas = new_canvas(**fig_kw)
canvas.setParent(parent)
fig = canvas.figure
ax = fig.add_subplot(1, 1, 1, **subplot_kw)
return fig, ax
def figimage(image, scale=1, dpi=None, **kwargs):
"""Return figure and axes with figure tightly surrounding image.
Unlike pyplot.figimage, this actually plots onto an axes object, which
fills the figure. Plotting the image onto an axes allows for subsequent
overlays of axes artists.
Parameters
----------
image : array
image to plot
scale : float
If scale is 1, the figure and axes have the same dimension as the
image. Smaller values of `scale` will shrink the figure.
dpi : int
Dots per inch for figure. If None, use the default rcParam.
"""
dpi = dpi if dpi is not None else mpl.rcParams['figure.dpi']
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('cmap', 'gray')
h, w, d = np.atleast_3d(image).shape
figsize = np.array((w, h), dtype=float) / dpi * scale
fig, ax = new_plot(figsize=figsize, dpi=dpi)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1)
ax.set_axis_off()
ax.imshow(image, **kwargs)
ax.figure.canvas.draw()
return fig, ax
def update_axes_image(image_axes, image):
"""Update the image displayed by an image plot.
This sets the image plot's array and updates its shape appropriately
Parameters
----------
image_axes : `matplotlib.image.AxesImage`
Image axes to update.
image : array
Image array.
"""
image_axes.set_array(image)
# Adjust size if new image shape doesn't match the original
h, w = image.shape[:2]
image_axes.set_extent((0, w, h, 0))
| mit |
ilyes14/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
gclenaghan/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 18 | 3768 | """
=======================================================================================
Topic extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation
=======================================================================================
This is an example of applying Non-negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
print("Loading dataset...")
t0 = time()
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data
print("done in %0.3fs." % (time() - t0))
# Use tf-idf features for NMF.
print("Extracting tf-idf features for NMF...")
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, #max_features=n_features,
stop_words='english')
t0 = time()
tfidf = tfidf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf features,"
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1, alpha=.1, l1_ratio=.5).fit(tfidf)
exit()
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("Fitting LDA models with tf features, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
anhaidgroup/py_stringsimjoin | py_stringsimjoin/tests/test_size_filter.py | 1 | 25474 | import unittest
from nose.tools import assert_equal, assert_list_equal, nottest, raises
from py_stringmatching.tokenizer.delimiter_tokenizer import DelimiterTokenizer
from py_stringmatching.tokenizer.qgram_tokenizer import QgramTokenizer
import numpy as np
import pandas as pd
from py_stringsimjoin.filter.size_filter import SizeFilter
from py_stringsimjoin.utils.converter import dataframe_column_to_str
from py_stringsimjoin.utils.generic_helper import remove_redundant_attrs
# test SizeFilter.filter_pair method
class FilterPairTestCases(unittest.TestCase):
def setUp(self):
self.dlm = DelimiterTokenizer(delim_set=[' '], return_set=True)
self.qg2 = QgramTokenizer(2)
# tests for JACCARD measure
def test_jac_dlm_08_prune(self):
self.test_filter_pair('aa bb cc dd ee', 'xx yy',
self.dlm, 'JACCARD', 0.8, False, False, True)
def test_jac_dlm_08_pass(self):
self.test_filter_pair('aa bb cc dd ee', 'xx yy aa tt',
self.dlm, 'JACCARD', 0.8, False, False, False)
# tests for COSINE measure
def test_cos_dlm_08_prune(self):
self.test_filter_pair('aa bb cc dd ee', 'xx yy',
self.dlm, 'COSINE', 0.8, False, False, True)
def test_cos_dlm_08_pass(self):
self.test_filter_pair('aa bb cc dd ee', 'xx yy aa tt',
self.dlm, 'COSINE', 0.8, False, False, False)
# tests for DICE measure
def test_dice_dlm_08_prune_lower(self):
self.test_filter_pair('aa bb cc dd ee', 'xx yy uu',
self.dlm, 'DICE', 0.8, False, False, True)
def test_dice_dlm_08_prune_upper(self):
self.test_filter_pair('aa bb cc dd ee', 'cc xx yy aa tt uu ii oo',
self.dlm, 'DICE', 0.8, False, False, True)
def test_dice_dlm_08_pass(self):
self.test_filter_pair('aa bb cc dd ee', 'xx yy aa tt',
self.dlm, 'DICE', 0.8, False, False, False)
# tests for OVERLAP measure
def test_overlap_dlm_prune(self):
self.test_filter_pair('aa bb cc dd ee', 'xx yy',
self.dlm, 'OVERLAP', 3, False, False, True)
def test_overlap_dlm_pass(self):
self.test_filter_pair('aa bb cc dd ee', 'xx yy aa',
self.dlm, 'OVERLAP', 3, False, False, False)
def test_overlap_dlm_empty(self):
self.test_filter_pair('', '',
self.dlm, 'OVERLAP', 1, False, False, True)
def test_overlap_dlm_empty_with_allow_empty(self):
self.test_filter_pair('', '',
self.dlm, 'OVERLAP', 1, True, False, True)
# tests for EDIT_DISTANCE measure
def test_edit_dist_qg2_prune(self):
self.test_filter_pair('abcd', 'cd',
self.qg2, 'EDIT_DISTANCE', 1, False, False, True)
def test_edit_dist_qg2_pass(self):
self.test_filter_pair('abcd', 'cd',
self.qg2, 'EDIT_DISTANCE', 2, False, False, False)
def test_edit_dist_qg2_empty(self):
self.test_filter_pair('', '',
self.qg2, 'EDIT_DISTANCE', 1, False, False, False)
def test_edit_dist_qg2_empty_with_allow_empty(self):
self.test_filter_pair('', '',
self.qg2, 'EDIT_DISTANCE', 1, True, False, False)
def test_edit_dist_qg2_no_padding_empty(self):
self.test_filter_pair('', '', QgramTokenizer(2, padding=False),
'EDIT_DISTANCE', 1, False, False, False)
# test allow_missing flag
def test_size_filter_pass_missing_left(self):
self.test_filter_pair(None, 'fg ty',
self.dlm, 'DICE', 0.8, False, True, False)
def test_size_filter_pass_missing_right(self):
self.test_filter_pair('fg ty', np.NaN,
self.dlm, 'DICE', 0.8, False, True, False)
def test_size_filter_pass_missing_both(self):
self.test_filter_pair(None, np.NaN,
self.dlm, 'DICE', 0.8, False, True, False)
# tests for empty string input
def test_empty_lstring(self):
self.test_filter_pair('ab', '', self.dlm, 'JACCARD', 0.8,
False, False, True)
def test_empty_rstring(self):
self.test_filter_pair('', 'ab', self.dlm, 'JACCARD', 0.8,
False, False, True)
def test_empty_strings(self):
self.test_filter_pair('', '', self.dlm, 'JACCARD', 0.8,
False, False, True)
def test_empty_strings_with_allow_empty(self):
self.test_filter_pair('', '', self.dlm, 'JACCARD', 0.8,
True, False, False)
@nottest
def test_filter_pair(self, lstring, rstring, tokenizer, sim_measure_type,
threshold, allow_empty, allow_missing, expected_output):
size_filter = SizeFilter(tokenizer, sim_measure_type, threshold,
allow_empty, allow_missing)
actual_output = size_filter.filter_pair(lstring, rstring)
assert_equal(actual_output, expected_output)
# test SizeFilter.filter_tables method
class FilterTablesTestCases(unittest.TestCase):
def setUp(self):
self.dlm = DelimiterTokenizer(delim_set=[' '], return_set=True)
self.A = pd.DataFrame([{'id': 1, 'attr':'ab cd ef aa bb'},
{'id': 2, 'attr':''},
{'id': 3, 'attr':'ab'},
{'id': 4, 'attr':'ll oo pp'},
{'id': 5, 'attr':'xy xx zz fg'},
{'id': 6, 'attr':None},
{'id': 7, 'attr':''}])
self.B = pd.DataFrame([{'id': 1, 'attr':'mn'},
{'id': 2, 'attr':'he ll'},
{'id': 3, 'attr':'xy pl ou'},
{'id': 4, 'attr':'aa'},
{'id': 5, 'attr':'fg cd aa ef'},
{'id': 6, 'attr':np.NaN},
{'id': 7, 'attr':' '}])
self.empty_table = pd.DataFrame(columns=['id', 'attr'])
self.default_l_out_prefix = 'l_'
self.default_r_out_prefix = 'r_'
# tests for JACCARD measure
def test_jac_dlm_08(self):
expected_pairs = set(['1,5', '3,1', '3,4', '4,3', '5,5'])
self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr'),
expected_pairs)
def test_jac_dlm_08_with_out_attrs(self):
expected_pairs = set(['1,5', '3,1', '3,4', '4,3', '5,5'])
self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr',
['attr'], ['attr']),
expected_pairs)
def test_jac_dlm_08_with_out_prefix(self):
expected_pairs = set(['1,5', '3,1', '3,4', '4,3', '5,5'])
self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr',
['attr'], ['attr'],
'ltable.', 'rtable.'),
expected_pairs)
# tests for COSINE measure
def test_cos_dlm_08(self):
expected_pairs = set(['1,5', '3,1', '3,4', '4,2', '4,3',
'4,5', '5,3', '5,5'])
self.test_filter_tables(self.dlm, 'COSINE', 0.8, False, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr'),
expected_pairs)
# tests for DICE measure
def test_dice_dlm_08(self):
expected_pairs = set(['1,5', '3,1', '3,4', '4,2', '4,3',
'4,5', '5,3', '5,5'])
self.test_filter_tables(self.dlm, 'DICE', 0.8, False, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr'),
expected_pairs)
# tests for OVERLAP measure
def test_overlap_dlm_3(self):
expected_pairs = set(['1,3', '1,5', '4,3', '4,5', '5,3', '5,5'])
self.test_filter_tables(self.dlm, 'OVERLAP', 3, False, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr'),
expected_pairs)
# tests for EDIT_DISTANCE measure
def test_edit_distance_qg2_2(self):
A = pd.DataFrame([{'l_id': 1, 'l_attr':'1990'},
{'l_id': 2, 'l_attr':'200'},
{'l_id': 3, 'l_attr':'0'},
{'l_id': 4, 'l_attr':''},
{'l_id': 5, 'l_attr':np.NaN}])
B = pd.DataFrame([{'r_id': 1, 'r_attr':'200155'},
{'r_id': 2, 'r_attr':'19'},
{'r_id': 3, 'r_attr':'188'},
{'r_id': 4, 'r_attr':''},
{'r_id': 5, 'r_attr':np.NaN}])
qg2_tok = QgramTokenizer(2)
expected_pairs = set(['1,1', '1,2', '1,3',
'2,2', '2,3', '2,3', '3,2', '3,3', '3,4',
'4,2', '4,4'])
self.test_filter_tables(qg2_tok, 'EDIT_DISTANCE', 2, False, False,
(A, B,
'l_id', 'r_id', 'l_attr', 'r_attr'),
expected_pairs)
# test allow_missing flag
def test_jac_dlm_08_allow_missing(self):
expected_pairs = set(['1,5', '3,1', '3,4', '4,3', '5,5',
'6,1', '6,2', '6,3', '6,4', '6,5',
'6,6', '6,7', '1,6', '2,6', '3,6',
'4,6', '5,6', '7,6'])
self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, True,
(self.A, self.B,
'id', 'id', 'attr', 'attr'),
expected_pairs)
# test allow_empty flag
def test_jac_dlm_08_allow_empty(self):
expected_pairs = set(['1,5', '2,7', '3,1', '3,4', '4,3', '5,5', '7,7'])
self.test_filter_tables(self.dlm, 'JACCARD', 0.8, True, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr'),
expected_pairs)
# test allow_empty flag with output attributes
def test_jac_dlm_08_allow_empty_with_out_attrs(self):
expected_pairs = set(['1,5', '2,7', '3,1', '3,4', '4,3', '5,5', '7,7'])
self.test_filter_tables(self.dlm, 'JACCARD', 0.8, True, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr',
['attr'], ['attr']),
expected_pairs)
# test with n_jobs above 1
def test_jac_dlm_08_with_njobs_above_1(self):
expected_pairs = set(['1,5', '3,1', '3,4', '4,3', '5,5'])
self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False,
(self.A, self.B,
'id', 'id', 'attr', 'attr',
['attr'], ['attr'],
'ltable.', 'rtable.', 2),
expected_pairs)
# test filter attribute of type int
def test_jac_qg2_with_filter_attr_of_type_int(self):
A = pd.DataFrame([{'l_id': 1, 'l_attr':1990},
{'l_id': 2, 'l_attr':2000},
{'l_id': 3, 'l_attr':0},
{'l_id': 4, 'l_attr':-1},
{'l_id': 5, 'l_attr':1986}])
B = pd.DataFrame([{'r_id': 1, 'r_attr':2001},
{'r_id': 2, 'r_attr':1992},
{'r_id': 3, 'r_attr':1886},
{'r_id': 4, 'r_attr':2007},
{'r_id': 5, 'r_attr':2012}])
dataframe_column_to_str(A, 'l_attr', inplace=True)
dataframe_column_to_str(B, 'r_attr', inplace=True)
qg2_tok = QgramTokenizer(2, return_set=True)
expected_pairs = set(['1,1', '1,2', '1,3', '1,4', '1,5',
'2,1', '2,2', '2,3', '2,4', '2,5',
'5,1', '5,2', '5,3', '5,4', '5,5'])
self.test_filter_tables(qg2_tok, 'JACCARD', 0.8, False, False,
(A, B,
'l_id', 'r_id', 'l_attr', 'r_attr'),
expected_pairs)
# tests for empty table input
def test_empty_ltable(self):
expected_pairs = set()
self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False,
(self.empty_table, self.B,
'id', 'id', 'attr', 'attr'),
expected_pairs)
def test_empty_rtable(self):
expected_pairs = set()
self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False,
(self.A, self.empty_table,
'id', 'id', 'attr', 'attr'),
expected_pairs)
def test_empty_tables(self):
expected_pairs = set()
self.test_filter_tables(self.dlm, 'JACCARD', 0.8, False, False,
(self.empty_table, self.empty_table,
'id', 'id', 'attr', 'attr'),
expected_pairs)
@nottest
def test_filter_tables(self, tokenizer, sim_measure_type, threshold,
allow_empty, allow_missing, args, expected_pairs):
size_filter = SizeFilter(tokenizer, sim_measure_type, threshold,
allow_empty, allow_missing)
actual_candset = size_filter.filter_tables(*args)
expected_output_attrs = ['_id']
l_out_prefix = self.default_l_out_prefix
r_out_prefix = self.default_r_out_prefix
# Check for l_out_prefix in args.
if len(args) > 8:
l_out_prefix = args[8]
expected_output_attrs.append(l_out_prefix + args[2])
# Check for r_out_prefix in args.
if len(args) > 9:
r_out_prefix = args[9]
expected_output_attrs.append(r_out_prefix + args[3])
# Check for l_out_attrs in args.
if len(args) > 6:
if args[6]:
l_out_attrs = remove_redundant_attrs(args[6], args[2])
for attr in l_out_attrs:
expected_output_attrs.append(l_out_prefix + attr)
# Check for r_out_attrs in args.
if len(args) > 7:
if args[7]:
r_out_attrs = remove_redundant_attrs(args[7], args[3])
for attr in r_out_attrs:
expected_output_attrs.append(r_out_prefix + attr)
# verify whether the output table has the necessary attributes.
assert_list_equal(list(actual_candset.columns.values),
expected_output_attrs)
actual_pairs = set()
for idx, row in actual_candset.iterrows():
actual_pairs.add(','.join((str(row[l_out_prefix + args[2]]),
str(row[r_out_prefix + args[3]]))))
# verify whether the actual pairs and the expected pairs match.
assert_equal(len(expected_pairs), len(actual_pairs))
common_pairs = actual_pairs.intersection(expected_pairs)
assert_equal(len(common_pairs), len(expected_pairs))
# test SizeFilter.filter_candset method
class FilterCandsetTestCases(unittest.TestCase):
def setUp(self):
self.dlm = DelimiterTokenizer(delim_set=[' '], return_set=True)
self.A = pd.DataFrame([{'l_id': 1, 'l_attr':'ab cd ef aa bb'},
{'l_id': 2, 'l_attr':''},
{'l_id': 3, 'l_attr':'ab'},
{'l_id': 4, 'l_attr':'ll oo pp'},
{'l_id': 5, 'l_attr':'xy xx zz fg'},
{'l_id': 6, 'l_attr': np.NaN}])
self.B = pd.DataFrame([{'r_id': 1, 'r_attr':'mn'},
{'r_id': 2, 'r_attr':'he ll'},
{'r_id': 3, 'r_attr':'xy pl ou'},
{'r_id': 4, 'r_attr':'aa'},
{'r_id': 5, 'r_attr':'fg cd aa ef'},
{'r_id': 6, 'r_attr':None}])
# generate cartesian product A x B to be used as candset
self.A['tmp_join_key'] = 1
self.B['tmp_join_key'] = 1
self.C = pd.merge(self.A[['l_id', 'tmp_join_key']],
self.B[['r_id', 'tmp_join_key']],
on='tmp_join_key').drop('tmp_join_key', 1)
self.empty_A = pd.DataFrame(columns=['l_id', 'l_attr'])
self.empty_B = pd.DataFrame(columns=['r_id', 'r_attr'])
self.empty_candset = pd.DataFrame(columns=['l_id', 'r_id'])
# tests for JACCARD measure
def test_jac_dlm_08(self):
expected_pairs = set(['1,5', '3,1', '3,4', '4,3', '5,5'])
self.test_filter_candset(self.dlm, 'JACCARD', 0.8, False, False,
(self.C, 'l_id', 'r_id',
self.A, self.B,
'l_id', 'r_id', 'l_attr', 'r_attr'),
expected_pairs)
# tests for COSINE measure
def test_cos_dlm_08(self):
expected_pairs = set(['1,5', '3,1', '3,4', '4,2', '4,3',
'4,5', '5,3', '5,5'])
self.test_filter_candset(self.dlm, 'COSINE', 0.8, False, False,
(self.C, 'l_id', 'r_id',
self.A, self.B,
'l_id', 'r_id', 'l_attr', 'r_attr'),
expected_pairs)
# tests for DICE measure
def test_dice_dlm_08(self):
expected_pairs = set(['1,5', '3,1', '3,4', '4,2', '4,3',
'4,5', '5,3', '5,5'])
self.test_filter_candset(self.dlm, 'DICE', 0.8, False, False,
(self.C, 'l_id', 'r_id',
self.A, self.B,
'l_id', 'r_id', 'l_attr', 'r_attr'),
expected_pairs)
# test allow_missing flag
def test_jac_dlm_08_allow_missing(self):
expected_pairs = set(['1,5', '3,1', '3,4', '4,3', '5,5',
'6,1', '6,2', '6,3', '6,4', '6,5',
'6,6', '1,6', '2,6', '3,6', '4,6', '5,6'])
self.test_filter_candset(self.dlm, 'JACCARD', 0.8, False, True,
(self.C, 'l_id', 'r_id',
self.A, self.B,
'l_id', 'r_id', 'l_attr', 'r_attr'),
expected_pairs)
# tests for empty candset input
def test_empty_candset(self):
expected_pairs = set()
self.test_filter_candset(self.dlm, 'JACCARD', 0.8, False, False,
(self.empty_candset, 'l_id', 'r_id',
self.empty_A, self.empty_B,
'l_id', 'r_id', 'l_attr', 'r_attr'),
expected_pairs)
@nottest
def test_filter_candset(self, tokenizer, sim_measure_type, threshold,
allow_empty, allow_missing, args, expected_pairs):
size_filter = SizeFilter(tokenizer, sim_measure_type, threshold,
allow_empty, allow_missing)
actual_output_candset = size_filter.filter_candset(*args)
# verify whether the output table has the necessary attributes.
assert_list_equal(list(actual_output_candset.columns.values),
list(args[0].columns.values))
actual_pairs = set()
for idx, row in actual_output_candset.iterrows():
actual_pairs.add(','.join((str(row[args[1]]), str(row[args[2]]))))
# verify whether the actual pairs and the expected pairs match.
assert_equal(len(expected_pairs), len(actual_pairs))
common_pairs = actual_pairs.intersection(expected_pairs)
assert_equal(len(common_pairs), len(expected_pairs))
class SizeFilterInvalidTestCases(unittest.TestCase):
def setUp(self):
self.A = pd.DataFrame([{'A.id':1, 'A.attr':'hello', 'A.int_attr':5}])
self.B = pd.DataFrame([{'B.id':1, 'B.attr':'world', 'B.int_attr':6}])
self.tokenizer = DelimiterTokenizer(delim_set=[' '], return_set=True)
self.sim_measure_type = 'JACCARD'
self.threshold = 0.8
@raises(TypeError)
def test_invalid_ltable(self):
size_filter = SizeFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
size_filter.filter_tables([], self.B, 'A.id', 'B.id',
'A.attr', 'B.attr')
@raises(TypeError)
def test_invalid_rtable(self):
size_filter = SizeFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
size_filter.filter_tables(self.A, [], 'A.id', 'B.id',
'A.attr', 'B.attr')
@raises(AssertionError)
def test_invalid_l_key_attr(self):
size_filter = SizeFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
size_filter.filter_tables(self.A, self.B, 'A.invalid_id', 'B.id',
'A.attr', 'B.attr')
@raises(AssertionError)
def test_invalid_r_key_attr(self):
size_filter = SizeFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
size_filter.filter_tables(self.A, self.B, 'A.id', 'B.invalid_id',
'A.attr', 'B.attr')
@raises(AssertionError)
def test_invalid_l_filter_attr(self):
size_filter = SizeFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
size_filter.filter_tables(self.A, self.B, 'A.id', 'B.id',
'A.invalid_attr', 'B.attr')
@raises(AssertionError)
def test_invalid_r_filter_attr(self):
size_filter = SizeFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
size_filter.filter_tables(self.A, self.B, 'A.id', 'B.id',
'A.attr', 'B.invalid_attr')
@raises(AssertionError)
def test_numeric_l_filter_attr(self):
size_filter = SizeFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
size_filter.filter_tables(self.A, self.B, 'A.id', 'B.id',
'A.int_attr', 'B.attr')
@raises(AssertionError)
def test_numeric_r_filter_attr(self):
size_filter = SizeFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
size_filter.filter_tables(self.A, self.B, 'A.id', 'B.id',
'A.attr', 'B.int_attr')
@raises(AssertionError)
def test_invalid_l_out_attr(self):
size_filter = SizeFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
size_filter.filter_tables(self.A, self.B, 'A.id', 'B.id',
'A.attr', 'B.attr',
['A.invalid_attr'], ['B.attr'])
@raises(AssertionError)
def test_invalid_r_out_attr(self):
size_filter = SizeFilter(self.tokenizer, self.sim_measure_type,
self.threshold)
size_filter.filter_tables(self.A, self.B, 'A.id', 'B.id',
'A.attr', 'B.attr',
['A.attr'], ['B.invalid_attr'])
@raises(TypeError)
def test_invalid_tokenizer(self):
size_filter = SizeFilter([], self.sim_measure_type, self.threshold)
@raises(AssertionError)
def test_invalid_tokenizer_for_edit_distance(self):
size_filter = SizeFilter(self.tokenizer, 'EDIT_DISTANCE', 2)
@raises(TypeError)
def test_invalid_sim_measure_type(self):
size_filter = SizeFilter(self.tokenizer, 'INVALID_TYPE', self.threshold)
@raises(AssertionError)
def test_invalid_threshold(self):
size_filter = SizeFilter(self.tokenizer, self.sim_measure_type, 1.2)
| bsd-3-clause |
ZENGXH/scikit-learn | sklearn/metrics/tests/test_classification.py | 42 | 52642 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, [(), ()]), 0)
assert_equal(accuracy_score(y1, y2, normalize=False), 1)
assert_equal(accuracy_score(y1, y1, normalize=False), 2)
assert_equal(accuracy_score(y2, y2, normalize=False), 2)
assert_equal(accuracy_score(y2, [(), ()], normalize=False), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
@ignore_warnings
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
"""Test handling of explicit additional (not in input) labels to PRF
"""
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
"""Test a subset of labels may be requested for PRF"""
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
@ignore_warnings # sequence of sequences is deprecated
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
make_ml = make_multilabel_classification
_, y_true_ll = make_ml(n_features=1, n_classes=n_classes, random_state=0,
n_samples=n_samples)
_, y_pred_ll = make_ml(n_features=1, n_classes=n_classes, random_state=1,
n_samples=n_samples)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
lb = MultiLabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, [(), ()]), 1)
assert_equal(zero_one_loss(y2, [tuple(), (10, )]), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, np.logical_not(y2)), 1)
assert_equal(hamming_loss(y1, np.logical_not(y1)), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, [(), ()]), 0.75)
assert_equal(hamming_loss(y1, [tuple(), (10, )]), 0.625)
assert_almost_equal(hamming_loss(y2, [tuple(), (10, )],
classes=np.arange(11)), 0.1818, 2)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, [(), ()]), 0)
# |y3 inter y4 | = [0, 1, 1]
# |y3 union y4 | = [2, 1, 3]
y3 = [(0,), (1,), (3,)]
y4 = [(4,), (4,), (5, 6)]
assert_almost_equal(jaccard_similarity_score(y3, y4), 0)
# |y5 inter y6 | = [0, 1, 1]
# |y5 union y6 | = [2, 1, 3]
y5 = [(0,), (1,), (2, 3)]
y6 = [(1,), (1,), (2, 0)]
assert_almost_equal(jaccard_similarity_score(y5, y6), (1 + 1 / 3) / 3)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true_ll = [(0,), (1,), (2, 3)]
y_pred_ll = [(1,), (1,), (2, 0)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
#tp = [0, 1, 1, 0]
#fn = [1, 0, 0, 1]
#fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weigted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check weigted
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true_ll = [(1,), (2,), (2, 3)]
y_pred_ll = [(4,), (4,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(1, 5)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check weigted
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true_ll = [(1,), (0,), (2, 1,)]
y_pred_ll = [tuple(), (3,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
@ignore_warnings # sequence of sequences is deprecated
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
SEQ = 'multilabel-sequences'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(SEQ, [[2, 3], [1], [3]]),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(SEQ, SEQ): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(IND, SEQ): None,
(MC, SEQ): None,
(BIN, SEQ): None,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(SEQ, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(SEQ, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(SEQ, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, SEQ, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, 0.24],
[-2.36, -0.79, -0.27, 0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
| bsd-3-clause |
magne-max/zipline-ja | zipline/pipeline/loaders/utils.py | 1 | 9840 | import datetime
import numpy as np
import pandas as pd
from zipline.utils.pandas_utils import mask_between_time
def is_sorted_ascending(a):
"""Check if a numpy array is sorted."""
return (np.fmax.accumulate(a) <= a).all()
def validate_event_metadata(event_dates,
event_timestamps,
event_sids):
assert is_sorted_ascending(event_dates), "event dates must be sorted"
assert len(event_sids) == len(event_dates) == len(event_timestamps), \
"mismatched arrays: %d != %d != %d" % (
len(event_sids),
len(event_dates),
len(event_timestamps),
)
def next_event_indexer(all_dates,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the next event for each
sid at each moment in time.
Locations where no next event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64)
sid_ixs = all_sids.searchsorted(event_sids)
# side='right' here ensures that we include the event date itself
# if it's in all_dates.
dt_ixs = all_dates.searchsorted(event_dates, side='right')
ts_ixs = all_dates.searchsorted(event_timestamps)
# Walk backward through the events, writing the index of the event into
# slots ranging from the event's timestamp to its asof. This depends for
# correctness on the fact that event_dates is sorted in ascending order,
# because we need to overwrite later events with earlier ones if their
# eligible windows overlap.
for i in range(len(event_sids) - 1, -1, -1):
start_ix = ts_ixs[i]
end_ix = dt_ixs[i]
out[start_ix:end_ix, sid_ixs[i]] = i
return out
def previous_event_indexer(all_dates,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the previous event for
each sid at each moment in time.
Locations where no previous event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64)
eff_dts = np.maximum(event_dates, event_timestamps)
sid_ixs = all_sids.searchsorted(event_sids)
dt_ixs = all_dates.searchsorted(eff_dts)
# Walk backwards through the events, writing the index of the event into
# slots ranging from max(event_date, event_timestamp) to the start of the
# previously-written event. This depends for correctness on the fact that
# event_dates is sorted in ascending order, because we need to have written
# later events so we know where to stop forward-filling earlier events.
last_written = {}
for i in range(len(event_dates) - 1, -1, -1):
sid_ix = sid_ixs[i]
dt_ix = dt_ixs[i]
out[dt_ix:last_written.get(sid_ix, None), sid_ix] = i
last_written[sid_ix] = dt_ix
return out
def normalize_data_query_time(dt, time, tz):
"""Apply the correct time and timezone to a date.
Parameters
----------
dt : pd.Timestamp
The original datetime that represents the date.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
Returns
-------
query_dt : pd.Timestamp
The timestamp with the correct time and date in utc.
"""
# merge the correct date with the time in the given timezone then convert
# back to utc
return pd.Timestamp(
datetime.datetime.combine(dt.date(), time),
tz=tz,
).tz_convert('utc')
def normalize_data_query_bounds(lower, upper, time, tz):
"""Adjust the first and last dates in the requested datetime index based on
the provided query time and tz.
lower : pd.Timestamp
The lower date requested.
upper : pd.Timestamp
The upper date requested.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
"""
# Subtract one day to grab things that happened on the first day we are
# requesting. This doesn't need to be a trading day, we are only adding
# a lower bound to limit the amount of in memory filtering that needs
# to happen.
lower -= datetime.timedelta(days=1)
if time is not None:
return normalize_data_query_time(
lower,
time,
tz,
), normalize_data_query_time(
upper,
time,
tz,
)
return lower, upper
_midnight = datetime.time(0, 0)
def normalize_timestamp_to_query_time(df,
time,
tz,
inplace=False,
ts_field='timestamp'):
"""Update the timestamp field of a dataframe to normalize dates around
some data query time/timezone.
Parameters
----------
df : pd.DataFrame
The dataframe to update. This needs a column named ``ts_field``.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
inplace : bool, optional
Update the dataframe in place.
ts_field : str, optional
The name of the timestamp field in ``df``.
Returns
-------
df : pd.DataFrame
The dataframe with the timestamp field normalized. If ``inplace`` is
true, then this will be the same object as ``df`` otherwise this will
be a copy.
"""
if not inplace:
# don't mutate the dataframe in place
df = df.copy()
dtidx = pd.DatetimeIndex(df.loc[:, ts_field], tz='utc')
dtidx_local_time = dtidx.tz_convert(tz)
to_roll_forward = mask_between_time(
dtidx_local_time,
time,
_midnight,
include_end=False,
)
# For all of the times that are greater than our query time add 1
# day and truncate to the date.
# We normalize twice here because of a bug in pandas 0.16.1 that causes
# tz_localize() to shift some timestamps by an hour if they are not grouped
# together by DST/EST.
df.loc[to_roll_forward, ts_field] = (
dtidx_local_time[to_roll_forward] + datetime.timedelta(days=1)
).normalize().tz_localize(None).tz_localize('utc').normalize()
df.loc[~to_roll_forward, ts_field] = dtidx[~to_roll_forward].normalize()
return df
def check_data_query_args(data_query_time, data_query_tz):
"""Checks the data_query_time and data_query_tz arguments for loaders
and raises a standard exception if one is None and the other is not.
Parameters
----------
data_query_time : datetime.time or None
data_query_tz : tzinfo or None
Raises
------
ValueError
Raised when only one of the arguments is None.
"""
if (data_query_time is None) ^ (data_query_tz is None):
raise ValueError(
"either 'data_query_time' and 'data_query_tz' must both be"
" None or neither may be None (got %r, %r)" % (
data_query_time,
data_query_tz,
),
)
| apache-2.0 |
swatlab/uplift-analysis | src_code_metrics.py | 1 | 4848 | import re, csv, pytz, json, subprocess
from dateutil import parser
import pandas as pd
import get_bugs
from libmozdata import patchanalysis
# Execute a shell command
def shellCommand(command_str):
cmd =subprocess.Popen(command_str.split(' '), stdout=subprocess.PIPE)
cmd_out, cmd_err = cmd.communicate()
return cmd_out
def loadReleaseDate():
print 'Loading Relase date ...'
rel_date_list = list()
rel_list = list()
with open('complexity_sna/data/release2commit.csv') as f:
csvreader = csv.reader(f)
for row in csvreader:
rel_num = row[0]
rel_date = re.sub(r'[^0-9]', '', row[2])
rel_date_list.append([rel_date, rel_num])
rel_list.append(rel_num)
return rel_date_list, list(reversed(rel_list))
def loadCommitDate():
print 'Loading commit date ...'
commit_date_dict = dict()
with open('commit_date.csv') as f:
csvreader = csv.reader(f, delimiter='\t')
for row in csvreader:
commit_id = row[0]
raw_time = row[1]
datetime_obj = parser.parse(raw_time)
time_str = datetime_obj.astimezone(pytz.utc).strftime('%Y%m%d')
commit_date_dict[commit_id] = time_str
return commit_date_dict
def correspondingRelease(commit_id, commit_date_dict, rel_date_list):
if commit_id in commit_date_dict:
commit_date = commit_date_dict[commit_id]
else:
for key in commit_date_dict:
if commit_id in key:
commit_date = commit_date_dict[key]
for item in rel_date_list:
if commit_date >= item[0]:
return item[1]
return rel_date_list[-1][1]
def removePrefix(path):
return re.sub(r'^[\/\.]+', '', path)
def loadMetrics4Releases(category, release_list):
rel_metric_dict = dict()
metric_names = None
for rel in release_list:
metric_dict = dict()
metric_file = 'complexity_sna/code_metrics/%s-%s.csv' %(category, rel.replace('.', '_'))
with open(metric_file, 'r') as f:
csvreader = csv.reader(f)
metric_names = next(csvreader, None)[1:]
for line in csvreader:
key = removePrefix(line[0])
metric_dict[key] = line[1:]
rel_metric_dict[rel] = metric_dict
return rel_metric_dict, metric_names
def extractSourceCodeMetrics(rel_date_list, rel_list, commit_date_dict, category):
# load metrics
rel_metric_dict, metric_names = loadMetrics4Releases(category, rel_list)
# map and compute metric values
result_list = list()
i = 0
bugs = get_bugs.get_all()
for bug in bugs:
if DEBUG and i > 5:
break
bug_id = bug['id']
commits, _ = patchanalysis.get_commits_for_bug(bug)
print bug_id
# extract metrics
raw_list = list()
metric_list = list()
for commit_id in commits:
i += 1
if DEBUG:
print ' ', commit_id
# corresponding (prior) release of a commit
rel_num = correspondingRelease(commit_id, commit_date_dict, rel_date_list)
# changed files in a commit
shell_res = shellCommand('hg -R %s log -r %s --template {files}\t{diffstat}' %(HG_REPO_PATH, commit_id)).split('\t')
raw_changed_files = shell_res[0]
cpp_changed_files = re.findall(r'(\S+\.(?:c|cpp|cc|cxx|h|hpp|hxx)\b)', raw_changed_files)
# map file/node to metrics
for a_file in cpp_changed_files:
metric_dict = rel_metric_dict[rel_num]
for node in metric_dict:
if node in a_file:
metrics = metric_dict[node]
raw_list.append(metrics)
# compute average/sum value for a specific attachment
if len(raw_list):
df = pd.DataFrame(raw_list, columns=metric_names).apply(pd.to_numeric)
for metric_name in metric_names:
metric_list.append(round(df[metric_name].mean(), 2))
result_list.append([bug_id] + metric_list)
else:
result_list.append([bug_id] + [0]*len(metric_names))
return pd.DataFrame(result_list, columns=['bug_id']+metric_names)
if __name__ == '__main__':
DEBUG = False
HG_REPO_PATH = '../firefox/'
# load data
rel_date_list, rel_list = loadReleaseDate()
commit_date_dict = loadCommitDate()
# extract metrics
df_complexity = extractSourceCodeMetrics(rel_date_list, rel_list, commit_date_dict, 'complexity')
df_sna = extractSourceCodeMetrics(rel_date_list, rel_list, commit_date_dict, 'sna')
df_code = pd.merge(df_complexity, df_sna, on='bug_id')
df_code.to_csv('independent_metrics/src_code_metrics.csv', index=False)
if DEBUG:
print df_code
| mpl-2.0 |
rdipietro/tensorflow | tensorflow/contrib/learn/python/learn/estimators/classifier_test.py | 16 | 5175 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.session_bundle import manifest_pb2
def iris_input_fn(num_epochs=None):
iris = tf.contrib.learn.datasets.load_iris()
features = tf.train.limit_epochs(
tf.reshape(tf.constant(iris.data), [-1, 4]), num_epochs=num_epochs)
labels = tf.reshape(tf.constant(iris.target), [-1])
return features, labels
def logistic_model_fn(features, labels, unused_mode):
labels = tf.one_hot(labels, 3, 1, 0)
prediction, loss = tf.contrib.learn.models.logistic_regression_zero_init(
features, labels)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def logistic_model_params_fn(features, labels, unused_mode, params):
labels = tf.one_hot(labels, 3, 1, 0)
prediction, loss = tf.contrib.learn.models.logistic_regression_zero_init(
features, labels)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
class ClassifierTest(tf.test.TestCase):
def testIrisAll(self):
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
self._runIrisAll(est)
def testIrisAllWithParams(self):
est = tf.contrib.learn.Classifier(model_fn=logistic_model_params_fn,
n_classes=3,
params={'learning_rate': 0.01})
self._runIrisAll(est)
def testIrisInputFn(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
est.fit(input_fn=iris_input_fn, steps=100)
est.evaluate(input_fn=iris_input_fn, steps=1, name='eval')
predict_input_fn = functools.partial(iris_input_fn, num_epochs=1)
predictions = list(est.predict(input_fn=predict_input_fn))
self.assertEqual(len(predictions), iris.target.shape[0])
def _runIrisAll(self, est):
iris = tf.contrib.learn.datasets.load_iris()
est.fit(iris.data, iris.target, steps=100)
scores = est.evaluate(x=iris.data, y=iris.target, name='eval')
predictions = list(est.predict(x=iris.data))
predictions_proba = list(est.predict_proba(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
self.assertAllEqual(predictions, np.argmax(predictions_proba, axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions)
self.assertAllClose(other_score, scores['accuracy'])
def _get_default_signature(self, export_meta_filename):
"""Gets the default signature from the export.meta file."""
with tf.Session():
save = tf.train.import_meta_graph(export_meta_filename)
meta_graph_def = save.export_meta_graph()
collection_def = meta_graph_def.collection_def
signatures_any = collection_def['serving_signatures'].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
default_signature = signatures.default_signature
return default_signature
# Disable this test case until b/31032996 is fixed.
def _testExportMonitorRegressionSignature(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
export_dir = tempfile.mkdtemp() + 'export/'
export_monitor = learn.monitors.ExportMonitor(
every_n_steps=1,
export_dir=export_dir,
exports_to_keep=1,
signature_fn=tf.contrib.learn.classifier.classification_signature_fn)
est.fit(iris.data, iris.target, steps=2, monitors=[export_monitor])
self.assertTrue(tf.gfile.Exists(export_dir))
self.assertFalse(tf.gfile.Exists(export_dir + '00000000/export'))
self.assertTrue(tf.gfile.Exists(export_dir + '00000002/export'))
# Validate the signature
signature = self._get_default_signature(export_dir + '00000002/export.meta')
self.assertTrue(signature.HasField('classification_signature'))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
rs2/pandas | pandas/tests/indexing/multiindex/test_xs.py | 1 | 9100 | import numpy as np
import pytest
from pandas import DataFrame, Index, IndexSlice, MultiIndex, Series, concat, date_range
import pandas._testing as tm
import pandas.core.common as com
@pytest.fixture
def four_level_index_dataframe():
arr = np.array(
[
[-0.5109, -2.3358, -0.4645, 0.05076, 0.364],
[0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
[-0.6662, -0.5243, -0.358, 0.89145, 2.5838],
]
)
index = MultiIndex(
levels=[["a", "x"], ["b", "q"], [10.0032, 20.0, 30.0], [3, 4, 5]],
codes=[[0, 0, 1], [0, 1, 1], [0, 1, 2], [2, 1, 0]],
names=["one", "two", "three", "four"],
)
return DataFrame(arr, index=index, columns=list("ABCDE"))
@pytest.mark.parametrize(
"key, level, exp_arr, exp_index",
[
("a", "lvl0", lambda x: x[:, 0:2], Index(["bar", "foo"], name="lvl1")),
("foo", "lvl1", lambda x: x[:, 1:2], Index(["a"], name="lvl0")),
],
)
def test_xs_named_levels_axis_eq_1(key, level, exp_arr, exp_index):
# see gh-2903
arr = np.random.randn(4, 4)
index = MultiIndex(
levels=[["a", "b"], ["bar", "foo", "hello", "world"]],
codes=[[0, 0, 1, 1], [0, 1, 2, 3]],
names=["lvl0", "lvl1"],
)
df = DataFrame(arr, columns=index)
result = df.xs(key, level=level, axis=1)
expected = DataFrame(exp_arr(arr), columns=exp_index)
tm.assert_frame_equal(result, expected)
def test_xs_values(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs(("bar", "two")).values
expected = df.values[4]
tm.assert_almost_equal(result, expected)
def test_xs_loc_equality(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs(("bar", "two"))
expected = df.loc[("bar", "two")]
tm.assert_series_equal(result, expected)
def test_xs_missing_values_in_index():
# see gh-6574
# missing values in returned index should be preserved
acc = [
("a", "abcde", 1),
("b", "bbcde", 2),
("y", "yzcde", 25),
("z", "xbcde", 24),
("z", None, 26),
("z", "zbcde", 25),
("z", "ybcde", 26),
]
df = DataFrame(acc, columns=["a1", "a2", "cnt"]).set_index(["a1", "a2"])
expected = DataFrame(
{"cnt": [24, 26, 25, 26]},
index=Index(["xbcde", np.nan, "zbcde", "ybcde"], name="a2"),
)
result = df.xs("z", level="a1")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("key, level", [("one", "second"), (["one"], ["second"])])
def test_xs_with_duplicates(key, level, multiindex_dataframe_random_data):
# see gh-13719
frame = multiindex_dataframe_random_data
df = concat([frame] * 2)
assert df.index.is_unique is False
expected = concat([frame.xs("one", level="second")] * 2)
result = df.xs(key, level=level)
tm.assert_frame_equal(result, expected)
def test_xs_level(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs("two", level="second")
expected = df[df.index.get_level_values(1) == "two"]
expected.index = Index(["foo", "bar", "baz", "qux"], name="first")
tm.assert_frame_equal(result, expected)
def test_xs_level_eq_2():
arr = np.random.randn(3, 5)
index = MultiIndex(
levels=[["a", "p", "x"], ["b", "q", "y"], ["c", "r", "z"]],
codes=[[2, 0, 1], [2, 0, 1], [2, 0, 1]],
)
df = DataFrame(arr, index=index)
expected = DataFrame(arr[1:2], index=[["a"], ["b"]])
result = df.xs("c", level=2)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"indexer",
[
lambda df: df.xs(("a", 4), level=["one", "four"]),
lambda df: df.xs("a").xs(4, level="four"),
],
)
def test_xs_level_multiple(indexer, four_level_index_dataframe):
df = four_level_index_dataframe
expected_values = [[0.4473, 1.4152, 0.2834, 1.00661, 0.1744]]
expected_index = MultiIndex(
levels=[["q"], [20.0]], codes=[[0], [0]], names=["two", "three"]
)
expected = DataFrame(expected_values, index=expected_index, columns=list("ABCDE"))
result = indexer(df)
tm.assert_frame_equal(result, expected)
def test_xs_setting_with_copy_error(multiindex_dataframe_random_data):
# this is a copy in 0.14
df = multiindex_dataframe_random_data
result = df.xs("two", level="second")
# setting this will give a SettingWithCopyError
# as we are trying to write a view
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
result[:] = 10
def test_xs_setting_with_copy_error_multiple(four_level_index_dataframe):
# this is a copy in 0.14
df = four_level_index_dataframe
result = df.xs(("a", 4), level=["one", "four"])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
result[:] = 10
def test_xs_integer_key():
# see gh-2107
dates = range(20111201, 20111205)
ids = list("abcde")
index = MultiIndex.from_product([dates, ids], names=["date", "secid"])
df = DataFrame(np.random.randn(len(index), 3), index, ["X", "Y", "Z"])
result = df.xs(20111201, level="date")
expected = df.loc[20111201, :]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"indexer", [lambda df: df.xs("a", level=0), lambda df: df.xs("a")]
)
def test_xs_level0(indexer, four_level_index_dataframe):
df = four_level_index_dataframe
expected_values = [
[-0.5109, -2.3358, -0.4645, 0.05076, 0.364],
[0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
]
expected_index = MultiIndex(
levels=[["b", "q"], [10.0032, 20.0], [4, 5]],
codes=[[0, 1], [0, 1], [1, 0]],
names=["two", "three", "four"],
)
expected = DataFrame(expected_values, index=expected_index, columns=list("ABCDE"))
result = indexer(df)
tm.assert_frame_equal(result, expected)
def test_xs_level_series(multiindex_dataframe_random_data):
# this test is not explicitly testing .xs functionality
# TODO: move to another module or refactor
df = multiindex_dataframe_random_data
s = df["A"]
result = s[:, "two"]
expected = df.xs("two", level=1)["A"]
tm.assert_series_equal(result, expected)
def test_xs_level_series_ymd(multiindex_year_month_day_dataframe_random_data):
# this test is not explicitly testing .xs functionality
# TODO: move to another module or refactor
df = multiindex_year_month_day_dataframe_random_data
s = df["A"]
result = s[2000, 5]
expected = df.loc[2000, 5]["A"]
tm.assert_series_equal(result, expected)
def test_xs_level_series_slice_not_implemented(
multiindex_year_month_day_dataframe_random_data,
):
# this test is not explicitly testing .xs functionality
# TODO: move to another module or refactor
# not implementing this for now
df = multiindex_year_month_day_dataframe_random_data
s = df["A"]
msg = r"\(2000, slice\(3, 4, None\)\)"
with pytest.raises(TypeError, match=msg):
s[2000, 3:4]
def test_xs_IndexSlice_argument_not_implemented():
# GH 35301
index = MultiIndex(
levels=[[("foo", "bar", 0), ("foo", "baz", 0), ("foo", "qux", 0)], [0, 1]],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
)
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
msg = (
"Expected label or tuple of labels, got "
r"\(\('foo', 'qux', 0\), slice\(None, None, None\)\)"
)
with pytest.raises(TypeError, match=msg):
frame.xs(IndexSlice[("foo", "qux", 0), :])
with pytest.raises(TypeError, match=msg):
series.xs(IndexSlice[("foo", "qux", 0), :])
def test_series_getitem_multiindex_xs():
# GH6258
dt = list(date_range("20130903", periods=3))
idx = MultiIndex.from_product([list("AB"), dt])
s = Series([1, 3, 4, 1, 3, 4], index=idx)
expected = Series([1, 1], index=list("AB"))
result = s.xs("20130903", level=1)
tm.assert_series_equal(result, expected)
def test_series_getitem_multiindex_xs_by_label():
# GH5684
idx = MultiIndex.from_tuples(
[("a", "one"), ("a", "two"), ("b", "one"), ("b", "two")]
)
s = Series([1, 2, 3, 4], index=idx)
return_value = s.index.set_names(["L1", "L2"], inplace=True)
assert return_value is None
expected = Series([1, 3], index=["a", "b"])
return_value = expected.index.set_names(["L1"], inplace=True)
assert return_value is None
result = s.xs("one", level="L2")
tm.assert_series_equal(result, expected)
def test_xs_levels_raises():
df = DataFrame({"A": [1, 2, 3]})
msg = "Index must be a MultiIndex"
with pytest.raises(TypeError, match=msg):
df.xs(0, level="as")
s = df.A
with pytest.raises(TypeError, match=msg):
s.xs(0, level="as")
| bsd-3-clause |
Lab603/PicEncyclopedias | jni-build/jni-build/jni/include/tensorflow/contrib/learn/python/learn/tests/multioutput_test.py | 5 | 1679 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-output tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
class MultiOutputTest(tf.test.TestCase):
"""Multi-output tests."""
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
x = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(x).ravel(), np.pi * np.cos(x).ravel()]).T
regressor = learn.TensorFlowLinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(x),
learning_rate=0.01, target_dimension=2)
regressor.fit(x, y)
score = mean_squared_error(regressor.predict(x), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
| mit |
timmie/cartopy | lib/cartopy/mpl/ticker.py | 3 | 10493 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""This module contains tools for handling tick marks in cartopy."""
from __future__ import (absolute_import, division, print_function)
from matplotlib.ticker import Formatter
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import GeoAxes
class _PlateCarreeFormatter(Formatter):
"""
Base class for formatting ticks on geographical axes using a
rectangular projection (e.g. Plate Carree, Mercator).
"""
_target_projection = ccrs.PlateCarree()
def __init__(self, degree_symbol=u'\u00B0', number_format='g',
transform_precision=1e-8):
"""
Base class for simpler implementation of specialised formatters
for latitude and longitude axes.
"""
self._degree_symbol = degree_symbol
self._number_format = number_format
self._transform_precision = transform_precision
def __call__(self, value, pos=None):
if not isinstance(self.axis.axes, GeoAxes):
raise TypeError("This formatter can only be "
"used with cartopy axes.")
# We want to produce labels for values in the familiar Plate Carree
# projection, so convert the tick values from their own projection
# before formatting them.
source = self.axis.axes.projection
if not isinstance(source, (ccrs._RectangularProjection,
ccrs.Mercator)):
raise TypeError("This formatter cannot be used with "
"non-rectangular projections.")
projected_value = self._apply_transform(value, self._target_projection,
source)
# Round the transformed value using a given precision for display
# purposes. Transforms can introduce minor rounding errors that make
# the tick values look bad, these need to be accounted for.
f = 1. / self._transform_precision
projected_value = round(f * projected_value) / f
# Return the formatted values, the formatter has both the re-projected
# tick value and the original axis value available to it.
return self._format_value(projected_value, value)
def _format_value(self, value, original_value):
hemisphere = self._hemisphere(value, original_value)
fmt_string = u'{value:{number_format}}{degree}{hemisphere}'
return fmt_string.format(value=abs(value),
number_format=self._number_format,
degree=self._degree_symbol,
hemisphere=hemisphere)
def _apply_transform(self, value, target_proj, source_crs):
"""
Given a single value, a target projection and a source CRS,
transforms the value from the source CRS to the target
projection, returning a single value.
"""
raise NotImplementedError("A subclass must implement this method.")
def _hemisphere(self, value, value_source_crs):
"""
Given both a tick value in the Plate Carree projection and the
same value in the source CRS returns a string indicating the
hemisphere that the value is in.
Must be over-ridden by the derived class.
"""
raise NotImplementedError("A subclass must implement this method.")
class LatitudeFormatter(_PlateCarreeFormatter):
"""Tick formatter for latitude axes."""
def __init__(self, degree_symbol=u'\u00B0', number_format='g',
transform_precision=1e-8):
"""
Tick formatter for a latitude axis.
The axis must be part of an axes defined on a rectangular
projection (e.g. Plate Carree, Mercator).
.. note::
A formatter can only be used for one axis. A new formatter
must be created for every axis that needs formatted labels.
Kwargs:
* degree_symbol (string):
The character(s) used to represent the degree symbol in the
tick labels. Defaults to u'\u00B0' which is the unicode
degree symbol. Can be an empty string if no degree symbol is
desired.
* number_format (string):
Format string to represent the tick values. Defaults to 'g'.
* transform_precision (float):
Sets the precision (in degrees) to which transformed tick
values are rounded. The default is 1e-7, and should be
suitable for most use cases. To control the appearance of
tick labels use the *number_format* keyword.
Examples:
Label latitudes from -90 to 90 on a Plate Carree projection::
ax = plt.axes(projection=PlateCarree())
ax.set_global()
ax.set_yticks([-90, -60, -30, 0, 30, 60, 90],
crs=ccrs.PlateCarree())
lat_formatter = LatitudeFormatter()
ax.yaxis.set_major_formatter(lat_formatter)
Label latitudes from -80 to 80 on a Mercator projection, this
time omitting the degree symbol::
ax = plt.axes(projection=Mercator())
ax.set_global()
ax.set_yticks([-90, -60, -30, 0, 30, 60, 90],
crs=ccrs.PlateCarree())
lat_formatter = LatitudeFormatter(degree_symbol='')
ax.yaxis.set_major_formatter(lat_formatter)
"""
super(LatitudeFormatter, self).__init__(
degree_symbol=degree_symbol,
number_format=number_format,
transform_precision=transform_precision)
def _apply_transform(self, value, target_proj, source_crs):
return target_proj.transform_point(0, value, source_crs)[1]
def _hemisphere(self, value, value_source_crs):
if value > 0:
hemisphere = 'N'
elif value < 0:
hemisphere = 'S'
else:
hemisphere = ''
return hemisphere
class LongitudeFormatter(_PlateCarreeFormatter):
"""Tick formatter for a longitude axis."""
def __init__(self,
zero_direction_label=False,
dateline_direction_label=False,
degree_symbol=u'\u00B0',
number_format='g',
transform_precision=1e-8):
"""
Create a formatter for longitude values.
The axis must be part of an axes defined on a rectangular
projection (e.g. Plate Carree, Mercator).
.. note::
A formatter can only be used for one axis. A new formatter
must be created for every axis that needs formatted labels.
Kwargs:
* zero_direction_label (False | True):
If *True* a direction label (E or W) will be drawn next to
longitude labels with the value 0. If *False* then these
labels will not be drawn. Defaults to *False* (no direction
labels).
* dateline_direction_label (False | True):
If *True* a direction label (E or W) will be drawn next to
longitude labels with the value 180. If *False* then these
labels will not be drawn. Defaults to *False* (no direction
labels).
* degree_symbol (string):
The symbol used to represent degrees. Defaults to u'\u00B0'
which is the unicode degree symbol.
* number_format (string):
Format string to represent the longitude values. Defaults to
'g'.
* transform_precision (float):
Sets the precision (in degrees) to which transformed tick
values are rounded. The default is 1e-7, and should be
suitable for most use cases. To control the appearance of
tick labels use the *number_format* keyword.
Examples:
Label longitudes from -180 to 180 on a Plate Carree projection
with a central longitude of 0::
ax = plt.axes(projection=PlateCarree())
ax.set_global()
ax.set_xticks([-180, -120, -60, 0, 60, 120, 180],
crs=ccrs.PlateCarree())
lon_formatter = LongitudeFormatter()
ax.xaxis.set_major_formatter(lon_formatter)
Label longitudes from 0 to 360 on a Plate Carree projection
with a central longitude of 180::
ax = plt.axes(projection=PlateCarree(central_longitude=180))
ax.set_global()
ax.set_xticks([0, 60, 120, 180, 240, 300, 360],
crs=ccrs.PlateCarree())
ont_formatter = LongitudeFormatter()
ax.xaxis.set_major_formatter(lon_formatter)
"""
super(LongitudeFormatter, self).__init__(
degree_symbol=degree_symbol,
number_format=number_format,
transform_precision=transform_precision)
self._zero_direction_labels = zero_direction_label
self._dateline_direction_labels = dateline_direction_label
def _apply_transform(self, value, target_proj, source_crs):
return target_proj.transform_point(value, 0, source_crs)[0]
def _hemisphere(self, value, value_source_crs):
# Perform basic hemisphere detection.
if value < 0:
hemisphere = 'W'
elif value > 0:
hemisphere = 'E'
else:
hemisphere = ''
# Correct for user preferences:
if value == 0 and self._zero_direction_labels:
# Use the original tick value to determine the hemisphere.
if value_source_crs < 0:
hemisphere = 'E'
else:
hemisphere = 'W'
if value in (-180, 180) and not self._dateline_direction_labels:
hemisphere = ''
return hemisphere
| gpl-3.0 |
ifcharming/voltdb2.1 | tools/vis.py | 1 | 5697 | #!/usr/bin/env python
# This is a visualizer which pulls TPC-C benchmark results from the MySQL
# databases and visualizes them. Four graphs will be generated, latency graph on
# sinigle node and multiple nodes, and throughput graph on single node and
# multiple nodes.
#
# Run it without any arguments to see what arguments are needed.
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) +
os.sep + 'tests/scripts/')
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from voltdbclient import *
STATS_SERVER = 'volt2'
def COLORS(k):
return (((k ** 3) % 255) / 255.0,
((k * 100) % 255) / 255.0,
((k * k) % 255) / 255.0)
MARKERS = ['+', '*', '<', '>', '^', '_',
'D', 'H', 'd', 'h', 'o', 'p']
def get_stats(hostname, port, days):
"""Get statistics of all runs
Example return value:
{ u'VoltKV': [ { 'lat95': 21,
'lat99': 35,
'nodes': 1,
'throughput': 104805,
'date': datetime object}],
u'Voter': [ { 'lat95': 20,
'lat99': 47,
'nodes': 1,
'throughput': 66287,
'date': datetime object}]}
"""
conn = FastSerializer(hostname, port)
proc = VoltProcedure(conn, 'BestOfPeriod',
[FastSerializer.VOLTTYPE_SMALLINT])
resp = proc.call([days])
conn.close()
# keyed on app name, value is a list of runs sorted chronologically
stats = dict()
run_stat_keys = ['nodes', 'date', 'tps', 'lat95', 'lat99']
for row in resp.tables[0].tuples:
app_stats = []
if row[0] not in stats:
stats[row[0]] = app_stats
else:
app_stats = stats[row[0]]
run_stats = dict(zip(run_stat_keys, row[1:]))
app_stats.append(run_stats)
# sort each one
for app_stats in stats.itervalues():
app_stats.sort(key=lambda x: x['date'])
return stats
class Plot:
DPI = 100.0
def __init__(self, title, xlabel, ylabel, filename, w, h):
self.filename = filename
self.legends = {}
w = w == None and 800 or w
h = h == None and 300 or h
fig = plt.figure(figsize=(w / self.DPI, h / self.DPI),
dpi=self.DPI)
self.ax = fig.add_subplot(111)
self.ax.set_title(title)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.ylabel(ylabel, fontsize=8)
plt.xlabel(xlabel, fontsize=8)
fig.autofmt_xdate()
def plot(self, x, y, color, marker_shape, legend):
self.ax.plot(x, y, linestyle="-", label=str(legend),
marker=marker_shape, markerfacecolor=color, markersize=4)
def close(self):
formatter = matplotlib.dates.DateFormatter("%b %d")
self.ax.xaxis.set_major_formatter(formatter)
plt.legend(prop={'size': 10}, loc=0)
plt.savefig(self.filename, format="png", transparent=False,
bbox_inches="tight", pad_inches=0.2)
def plot(title, xlabel, ylabel, filename, nodes, width, height, data,
data_type):
plot_data = dict()
for app, runs in data.iteritems():
for v in runs:
if v['nodes'] != nodes:
continue
if app not in plot_data:
plot_data[app] = {'time': [], data_type: []}
datenum = matplotlib.dates.date2num(v['date'])
plot_data[app]['time'].append(datenum)
if data_type == 'tps':
value = v['tps']/v['nodes']
else:
value = v[data_type]
plot_data[app][data_type].append(value)
if len(plot_data) == 0:
return
i = 0
pl = Plot(title, xlabel, ylabel, filename, width, height)
sorted_data = sorted(plot_data.items(), key=lambda x: x[0])
for k, v in sorted_data:
pl.plot(v['time'], v[data_type], COLORS(i), MARKERS[i], k)
i += 3
pl.close()
def usage():
print "Usage:"
print "\t", sys.argv[0], "output_dir filename_base" \
" [width] [height]"
print
print "\t", "width in pixels"
print "\t", "height in pixels"
def main():
if len(sys.argv) < 3:
usage()
exit(-1)
if not os.path.exists(sys.argv[1]):
print sys.argv[2], "does not exist"
exit(-1)
path = os.path.join(sys.argv[1], sys.argv[2])
width = None
height = None
if len(sys.argv) >= 4:
width = int(sys.argv[3])
if len(sys.argv) >= 5:
height = int(sys.argv[4])
stats = get_stats(STATS_SERVER, 21212, 30)
# Plot single node stats for all apps
plot("Average Latency on Single Node", "Time", "Latency (ms)",
path + "-latency-single.png", 1, width, height, stats, 'lat99')
plot("Single Node Performance", "Time", "Throughput (txns/sec)",
path + "-throughput-single.png", 1, width, height, stats, 'tps')
# Plot 3 node stats for all apps
plot("Average Latency on 3 Nodes", "Time", "Latency (ms)",
path + "-latency-3.png", 3, width, height, stats, 'lat99')
plot("3 Node Performance", "Time", "Throughput (txns/sec)",
path + "-throughput-3.png", 3, width, height, stats, 'tps')
# Plot 6 node stats for all apps
plot("Average Latency on 6 Node", "Time", "Latency (ms)",
path + "-latency-6.png", 6, width, height, stats, 'lat99')
plot("6 Node Performance", "Time", "Throughput (txns/sec)",
path + "-throughput-6.png", 6, width, height, stats, 'tps')
if __name__ == "__main__":
main()
| gpl-3.0 |
gdetor/SI-RF-Structure | Statistics/clear_data.py | 1 | 5369 | # Copyright (c) 2014, Georgios Is. Detorakis (gdetor@gmail.com) and
# Nicolas P. Rougier (nicolas.rougier@inria.fr)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file is part of the source code accompany the peer-reviewed article:
# [1] "Structure of Receptive Fields in a Computational Model of Area 3b of
# Primary Sensory Cortex", Georgios Is. Detorakis and Nicolas P. Rougier,
# Frontiers in Computational Neuroscience, 2014.
#
# This script applies all the filters and cleaning techniques to the ncRFs. You
# have to use this script before any further statistical analysis to the data.
import numpy as np
from matplotlib import rc
import matplotlib.pylab as plt
from scipy.stats.stats import pearsonr
from scipy.stats.mstats import gmean
from scipy.ndimage import gaussian_filter
def locate_noise( input ):
n = input.shape[0]
data = input.copy()
count = 0
for i in range( 1,n-1 ):
for j in range( 1,n-1 ):
if data[i,j] != 0:
if data[i+1,j] != 0 and np.sign(data[i+1,j])==np.sign(data[i,j]):
count += 1
if data[i-1,j] != 0 and np.sign(data[i-1,j])==np.sign(data[i,j]):
count += 1
if data[i,j-1] != 0 and np.sign(data[i,j-1])==np.sign(data[i,j]):
count += 1
if data[i,j+1] != 0 and np.sign(data[i,j+1])==np.sign(data[i,j]):
count += 1
if count < 2:
data[i,j] = 0
count = 0
return data
# Computing the area of the receptive fields according to Dicarlo's
# protocol described in article "Structure of Receptive Fields in area 3b...
def clear_data( RFs, n ):
p = 25
Z, T = [], []
Noise = np.load( 'noise.npy' ).reshape(n*n,p,p)
cRFs = np.zeros((n*n,p,p))
for i in range( n ):
for j in range( n ):
RF = RFs[i,j,...]
# WARNING : Centering the RF
s0,s1 = np.unravel_index(np.argmax(RF),RF.shape)
RF = np.roll(RF,13-s0,axis=0)
RF = np.roll(RF,13-s1,axis=1)
# WARNING : Centering the RF
# RF += Noise[i*n+j]
# RF = gaussian_filter( RF, sigma=2.2 )
RF += 1.5*Noise[i*n+j]
RF = gaussian_filter( RF, sigma=1.5 )
abs_max = np.max( np.abs( RF ) )
RF[np.where( ( ( RF < +0.10*abs_max ) & (RF>0) ) | ( ( RF > -0.10*abs_max ) & (RF < 0) ) ) ]=0
RF = locate_noise( RF )
cRFs[i*n+j,...] = RF
exc = 50.0 * ( RF > 0).sum()/( p * p )
inh = 50.0 * ( RF < 0).sum()/( p * p )
Z.append([exc,inh])
Z = np.array(Z)
np.nan_to_num(Z)
print '------ Excitatory ------- Inhibitory -------'
print 'Minimum :', Z[:,0].min(), Z[:,1].min()
print 'Maximum :', Z[:,0].max(), Z[:,1].max()
print 'Mean :', np.mean( Z[:,0] ), np.mean( Z[:,1] )
print 'Mean :', np.mean( np.log10(Z[:,0]) ), np.mean( np.log10(Z[:,1]) )
print 'SD : ', np.std( np.log10(Z[:,0]) ), np.std( np.log10(Z[:,1]) )
print 'GMean :', gmean( Z[:,0] ), gmean( Z[:,1] )
print "Pearson cor: ", pearsonr( Z[:,0], np.abs(Z[:,1]) )
return Z, cRFs
# Computing the SNR of the receptive fields.
def snr( signal, sigma ):
k = signal.shape[0]
# Filtering the input signal
filtered_s = gaussian_filter( signal, sigma )
# Computing background noise
noise = signal - filtered_s
# Computing noise variance
noise_var = np.var( noise )
# Computing signal and noise power
signalPow = np.sum( signal**2 )/k
noisePow = np.sum( noise**2 )/k
# Computing snr and noise index
snr = 10.0 * np.log10( signalPow/noisePow )
noise_index = noise_var/np.abs(signal).max() *100.0
return snr, noise_index, filtered_s
# Main :p
if __name__=='__main__':
np.random.seed(137)
RFs = np.load('real-rfs-ref.npy').reshape(32,32,25,25)
n, size, bins = RFs.shape[0], RFs.shape[2], 70
Z, cRFs = clear_data( RFs, n )
np.save('areas-ref', Z)
np.save('cleared-rfs', cRFs)
| gpl-3.0 |
cheral/orange3 | doc/development/source/orange-demo/orangedemo/OWLearningCurveB.py | 2 | 13882 | import sys
from collections import OrderedDict
from functools import reduce
import numpy
import sklearn.cross_validation
from PyQt4.QtGui import QTableWidget, QTableWidgetItem
import Orange.data
import Orange.classification
from Orange.widgets import widget, gui, settings
from Orange.evaluation.testing import Results
class OWLearningCurveB(widget.OWWidget):
name = "Learning Curve (B)"
description = ("Takes a data set and a set of learners and shows a "
"learning curve in a table")
icon = "icons/LearningCurve.svg"
priority = 1010
# [start-snippet-1]
inputs = [("Data", Orange.data.Table, "set_dataset", widget.Default),
("Test Data", Orange.data.Table, "set_testdataset"),
("Learner", Orange.classification.Learner, "set_learner",
widget.Multiple + widget.Default)]
# [end-snippet-1]
#: cross validation folds
folds = settings.Setting(5)
#: points in the learning curve
steps = settings.Setting(10)
#: index of the selected scoring function
scoringF = settings.Setting(0)
#: compute curve on any change of parameters
commitOnChange = settings.Setting(True)
def __init__(self):
super().__init__()
# sets self.curvePoints, self.steps equidistant points from
# 1/self.steps to 1
self.updateCurvePoints()
self.scoring = [
("Classification Accuracy", Orange.evaluation.scoring.CA),
("AUC", Orange.evaluation.scoring.AUC),
("Precision", Orange.evaluation.scoring.Precision),
("Recall", Orange.evaluation.scoring.Recall)
]
#: input data on which to construct the learning curve
self.data = None
#: optional test data
self.testdata = None
#: A {input_id: Learner} mapping of current learners from input channel
self.learners = OrderedDict()
#: A {input_id: List[Results]} mapping of input id to evaluation
#: results list, one for each curve point
self.results = OrderedDict()
#: A {input_id: List[float]} mapping of input id to learning curve
#: point scores
self.curves = OrderedDict()
# GUI
box = gui.widgetBox(self.controlArea, "Info")
self.infoa = gui.widgetLabel(box, 'No data on input.')
self.infob = gui.widgetLabel(box, 'No learners.')
gui.separator(self.controlArea)
box = gui.widgetBox(self.controlArea, "Evaluation Scores")
gui.comboBox(box, self, "scoringF",
items=[x[0] for x in self.scoring],
callback=self._invalidate_curves)
gui.separator(self.controlArea)
box = gui.widgetBox(self.controlArea, "Options")
gui.spin(box, self, 'folds', 2, 100, step=1,
label='Cross validation folds: ', keyboardTracking=False,
callback=lambda:
self._invalidate_results() if self.commitOnChange else None
)
gui.spin(box, self, 'steps', 2, 100, step=1,
label='Learning curve points: ', keyboardTracking=False,
callback=[self.updateCurvePoints,
lambda: self._invalidate_results() if self.commitOnChange else None])
gui.checkBox(box, self, 'commitOnChange', 'Apply setting on any change')
self.commitBtn = gui.button(box, self, "Apply Setting",
callback=self._invalidate_results,
disabled=True)
gui.rubber(self.controlArea)
# table widget
self.table = gui.table(self.mainArea,
selectionMode=QTableWidget.NoSelection)
##########################################################################
# slots: handle input signals
def set_dataset(self, data):
"""Set the input train dataset."""
# Clear all results/scores
for id in list(self.results):
self.results[id] = None
for id in list(self.curves):
self.curves[id] = None
self.data = data
if data is not None:
self.infoa.setText('%d instances in input data set' % len(data))
else:
self.infoa.setText('No data on input.')
self.commitBtn.setEnabled(self.data is not None)
def set_testdataset(self, testdata):
"""Set a separate test dataset."""
# Clear all results/scores
for id in list(self.results):
self.results[id] = None
for id in list(self.curves):
self.curves[id] = None
self.testdata = testdata
def set_learner(self, learner, id):
"""Set the input learner for channel id."""
if id in self.learners:
if learner is None:
# remove a learner and corresponding results
del self.learners[id]
del self.results[id]
del self.curves[id]
else:
# update/replace a learner on a previously connected link
self.learners[id] = learner
# invalidate the cross-validation results and curve scores
# (will be computed/updated in `_update`)
self.results[id] = None
self.curves[id] = None
else:
if learner is not None:
self.learners[id] = learner
# initialize the cross-validation results and curve scores
# (will be computed/updated in `_update`)
self.results[id] = None
self.curves[id] = None
if len(self.learners):
self.infob.setText("%d learners on input." % len(self.learners))
else:
self.infob.setText("No learners.")
self.commitBtn.setEnabled(len(self.learners))
def handleNewSignals(self):
if self.data is not None:
self._update()
self._update_curve_points()
self._update_table()
def _invalidate_curves(self):
if self.data is not None:
self._update_curve_points()
self._update_table()
def _invalidate_results(self):
for id in self.learners:
self.curves[id] = None
self.results[id] = None
if self.data is not None:
self._update()
self._update_curve_points()
self._update_table()
def _update(self):
assert self.data is not None
# collect all learners for which results have not yet been computed
need_update = [(id, learner) for id, learner in self.learners.items()
if self.results[id] is None]
if not need_update:
return
learners = [learner for _, learner in need_update]
self.progressBarInit()
if self.testdata is None:
# compute the learning curve result for all learners in one go
results = learning_curve(
learners, self.data, folds=self.folds,
proportions=self.curvePoints,
callback=lambda value: self.progressBarSet(100 * value)
)
else:
results = learning_curve_with_test_data(
learners, self.data, self.testdata, times=self.folds,
proportions=self.curvePoints,
callback=lambda value: self.progressBarSet(100 * value)
)
self.progressBarFinished()
# split the combined result into per learner/model results
results = [list(Results.split_by_model(p_results)) for p_results in results]
for i, (id, learner) in enumerate(need_update):
self.results[id] = [p_results[i] for p_results in results]
def _update_curve_points(self):
for id in self.learners:
curve = [self.scoring[self.scoringF][1](x)[0]
for x in self.results[id]]
self.curves[id] = curve
def _update_table(self):
self.table.setRowCount(0)
self.table.setRowCount(len(self.curvePoints))
self.table.setColumnCount(len(self.learners))
self.table.setHorizontalHeaderLabels(
[learner.name for _, learner in self.learners.items()])
self.table.setVerticalHeaderLabels(
["{:.2f}".format(p) for p in self.curvePoints])
if self.data is None:
return
for column, curve in enumerate(self.curves.values()):
for row, point in enumerate(curve):
self.table.setItem(
row, column, QTableWidgetItem("{:.5f}".format(point)))
for i in range(len(self.learners)):
sh = self.table.sizeHintForColumn(i)
cwidth = self.table.columnWidth(i)
self.table.setColumnWidth(i, max(sh, cwidth))
def updateCurvePoints(self):
self.curvePoints = [(x + 1.)/self.steps for x in range(self.steps)]
def learning_curve(learners, data, folds=10, proportions=None,
random_state=None, callback=None):
if proportions is None:
proportions = numpy.linspace(0.0, 1.0, 10 + 1, endpoint=True)[1:]
def select_proportion_preproc(data, p, rstate=None):
assert 0 < p <= 1
rstate = numpy.random.RandomState(None) if rstate is None else rstate
indices = rstate.permutation(len(data))
n = int(numpy.ceil(len(data) * p))
return data[indices[:n]]
if callback is not None:
parts_count = len(proportions)
callback_wrapped = lambda part: \
lambda value: callback(value / parts_count + part / parts_count)
else:
callback_wrapped = lambda part: None
results = [
Orange.evaluation.CrossValidation(
data, learners, k=folds,
preprocessor=lambda data, p=p:
select_proportion_preproc(data, p),
callback=callback_wrapped(i)
)
for i, p in enumerate(proportions)
]
return results
def learning_curve_with_test_data(learners, traindata, testdata, times=10,
proportions=None, random_state=None,
callback=None):
if proportions is None:
proportions = numpy.linspace(0.0, 1.0, 10 + 1, endpoint=True)[1:]
def select_proportion_preproc(data, p, rstate=None):
assert 0 < p <= 1
rstate = numpy.random.RandomState(None) if rstate is None else rstate
indices = rstate.permutation(len(data))
n = int(numpy.ceil(len(data) * p))
return data[indices[:n]]
if callback is not None:
parts_count = len(proportions) * times
callback_wrapped = lambda part: \
lambda value: callback(value / parts_count + part / parts_count)
else:
callback_wrapped = lambda part: None
results = [
[Orange.evaluation.TestOnTestData(
traindata, testdata, learners,
preprocessor=lambda data, p=p:
select_proportion_preproc(data, p),
callback=callback_wrapped(i * times + t))
for t in range(times)]
for i, p in enumerate(proportions)
]
results = [reduce(results_add, res, Orange.evaluation.Results())
for res in results]
return results
def results_add(x, y):
def is_empty(res):
return (getattr(res, "models", None) is None
and getattr(res, "row_indices", None) is None)
if is_empty(x):
return y
elif is_empty(y):
return x
assert x.data is y.data
assert x.domain is y.domain
assert x.predicted.shape[0] == y.predicted.shape[0]
row_indices = numpy.hstack((x.row_indices, y.row_indices))
predicted = numpy.hstack((x.predicted, y.predicted))
actual = numpy.hstack((x.actual, y.actual))
xprob = getattr(x, "probabilities", None)
yprob = getattr(y, "probabilities", None)
if xprob is None and yprob is None:
prob = None
elif xprob is not None and yprob is not None:
prob = numpy.concatenate((xprob, yprob), axis=1)
else:
raise ValueError()
res = Orange.evaluation.Results()
res.data = x.data
res.domain = x.domain
res.row_indices = row_indices
res.actual = actual
res.predicted = predicted
res.folds = None
if prob is not None:
res.probabilities = prob
if x.models is not None and y.models is not None:
res.models = [xm + ym for xm, ym in zip(x.models, y.models)]
nmodels = predicted.shape[0]
xfailed = getattr(x, "failed", None) or [False] * nmodels
yfailed = getattr(y, "failed", None) or [False] * nmodels
assert len(xfailed) == len(yfailed)
res.failed = [xe or ye for xe, ye in zip(xfailed, yfailed)]
return res
def main(argv=sys.argv):
from PyQt4.QtGui import QApplication
app = QApplication(argv)
argv = app.argv()
if len(argv) > 1:
filename = argv[1]
else:
filename = "iris"
data = Orange.data.Table(filename)
indices = numpy.random.permutation(len(data))
traindata = data[indices[:-20]]
testdata = data[indices[-20:]]
ow = OWLearningCurveB()
ow.show()
ow.raise_()
ow.set_dataset(traindata)
ow.set_testdataset(testdata)
l1 = Orange.classification.NaiveBayesLearner()
l1.name = 'Naive Bayes'
ow.set_learner(l1, 1)
l2 = Orange.classification.LogisticRegressionLearner()
l2.name = 'Logistic Regression'
ow.set_learner(l2, 2)
l4 = Orange.classification.SklTreeLearner()
l4.name = "Decision Tree"
ow.set_learner(l4, 3)
ow.handleNewSignals()
app.exec_()
ow.set_dataset(None)
ow.set_testdataset(None)
ow.set_learner(None, 1)
ow.set_learner(None, 2)
ow.set_learner(None, 3)
ow.handleNewSignals()
return 0
if __name__=="__main__":
sys.exit(main())
| bsd-2-clause |
rizac/gfz-reportgen | gfzreport/sphinxbuild/map/__init__.py | 2 | 43603 | '''
This module implements the function `plotmap` which plots scattered points on a map
retrieved using ArgGIS Server REST API. The function is highly customizable and is basically a
wrapper around the `Basemap` library (for the map background)
plus matplotlib utilities (for plotting points, shapes, labels and legend)
Created on Mar 10, 2016
@author: riccardo
'''
import numpy as np
import re
from itertools import izip, chain
from urllib2 import URLError, HTTPError
import socket
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
from mpl_toolkits.basemap import Basemap
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
def parse_margins(obj, parsefunc=lambda margins: [float(val) for val in margins]):
"""Parses obj returning a 4 element numpy array denoting the top, right, bottom and left
values. This function first converts obj to a 4 element list L, and then
calls `parsefunc`, which by default converts all L values into float
:param obj: either None, a number, a list of numbers (allowed lengths: 1 to 4),
a comma/semicolon/spaces separated string (e.g. "4deg 0.0", "1, 1.2", "2km,4deg", "1 ; 2")
:param parsefunc: a function to be applied to obj converted to list. By default, returns
float(v) for any v in L
:return: a 4 element numpy array of floats denoting the top, right, bottom, left values of
the margins. The idea is the same as css margins, as depicted in the table below.
:Examples:
Called f `parsefunc`, then:
============= =========================
obj is returns
============= =========================
None [0, 0, 0, 0]
------------- -------------------------
string the list obtained after
splitting string via
regexp where comma,
semicolon and spaces
are valid separators
------------- -------------------------
x or [x] parsefunc([x, x, x, x])
------------- -------------------------
[x, y] parsefunc([x, y ,x, y])
------------- -------------------------
[x, y, z] parsefunc([x, y, z, y])
------------- -------------------------
[x, y, z, t] parsefunc([x, y, z, t])
============= =========================
"""
if obj is None:
margins = [0] * 4
elif hasattr(obj, "__iter__") and not isinstance(obj, str):
# is an iterable not string. Note the if above is py2 py3 compatible
margins = list(obj)
else:
try:
margins = [float(obj)] * 4
except (TypeError, ValueError):
margins = re.compile("(?:\\s*,\\s*|\\s*;\\s*|\\s+)").split(obj)
if len(margins) == 1:
margins *= 4
elif len(margins) == 2:
margins *= 2
elif len(margins) == 3:
margins.append(margins[1])
elif len(margins) != 4:
raise ValueError("unable to parse margins on invalid value '%s'" % obj)
return np.asarray(parsefunc(margins) if hasattr(parsefunc, "__call__") else margins)
# return margins
def parse_distance(dist, lat_0=None):
"""Returns the distance in degrees. If dist is in km or m, and lat_0 is not None,
returns w2lon, else h2lat. dist None defaults to 0
:param dist: float, int None, string. If string and has a unit, see above
"""
try:
return 0 if dist is None else float(dist)
except ValueError:
if dist[-3:].lower() == 'deg':
return float(dist[:-3])
elif dist[-2:] == 'km':
dst = 1000 * float(dist[:-2])
elif dist[-1:] == 'm':
dst = float(dist[:1])
else:
raise
return w2lon(dst, lat_0) if lat_0 is not None else h2lat(dst)
def get_lon0_lat0(min_lons, min_lats, max_lons, max_lats):
""" Calculates lat_0, lon_0, i.e., the mid point of the bounding box denoted by the
arguments
:param min_lons: the minimum of longitudes
:param min_lats: the maximum of latitudes
:param max_lons: the minimum of longitudes
:param max_lats: the maximum of latitudes
:return: the 2-element tuple denoting the mid point lon_0, lat_0
"""
lat_0 = max_lats / 2. + min_lats / 2.
lon_0 = max_lons / 2. + min_lons / 2.
if lon_0 > 180: # FIXME: necessary?? see self.get_normalized... above
lon_0 -= 360
return lon_0, lat_0
def getbounds(min_lon, min_lat, max_lon, max_lat, margins):
"""Calculates the bounds given the bounding box identified by the arguments and
given optional margins
:param min_lon: the minimum longitude (numeric, scalar)
:param min_lat: the maximum latitude (numeric, scalar)
:param max_lon: the minimum longitude (numeric, scalar)
:param max_lat: the maximum latitude (numeric, scalar)
:param margins: the margins as a css-like string (with units 'deg', 'km' or 'm'), or as
a 1 to 4 element array of numeric values (in that case denoting degrees).
As in css, a 4 element array denotes the [top, right, bottom, left] values.
None defaults to [0, 0, 0, 0].
:return: the 6-element tuple denoting lon_0, lat_0, min_lon, min_lat, max_lon, max_lat.
where min_lon, min_lat, max_lon, max_lat are the new bounds and lon_0 and lat_0 are
their midpoints (x and y, respectively)
"""
def parsefunc(mrgns):
"""parses mrgns as array of strings into array of floats
"""
return parse_distance(mrgns[0]), parse_distance(mrgns[1], max_lat), \
parse_distance(mrgns[2]), parse_distance(mrgns[3], min_lat)
top, right, btm, left = parse_margins(margins, parsefunc)
min_lon, min_lat, max_lon, max_lat = min_lon-left, min_lat-btm, max_lon+right, max_lat+top
if min_lon == max_lon:
min_lon -= 10 # in degrees
max_lon += 10 # in degrees
if min_lat == max_lat:
min_lat -= 10 # in degrees
max_lat += 10 # in degrees
# minima must be within bounds:
min_lat = max(-90, min_lat)
max_lat = min(90, max_lat)
min_lon = max(-180, min_lon)
max_lon = min(180, max_lon)
lon_0, lat_0 = get_lon0_lat0(min_lon, min_lat, max_lon, max_lat)
return lon_0, lat_0, min_lon, min_lat, max_lon, max_lat
# static constant converter (degree to meters and viceversa) for latitudes
DEG2M_LAT = 2 * np.pi * 6371 * 1000 / 360
def lat2h(distance_in_degrees):
"""converts latitude distance from degrees to height in meters
:param distance_in_degrees: a distance (python scalar or numpy array) along the great circle
espressed in degrees"""
deg2m_lat = DEG2M_LAT # 2 * np.pi * 6371 * 1000 / 360
return distance_in_degrees * deg2m_lat
def h2lat(distance_in_meters):
"""converts latitude distance from height in meters to degrees
:param distance_in_degrees: a distance (python scalar or numpy array) along the great circle
espressed in degrees"""
deg2m_lat = DEG2M_LAT # deg2m_lat = 2 * np.pi * 6371 * 1000 / 360
return distance_in_meters / deg2m_lat
def lon2w(distance_in_degrees, lat_0):
"""converts longitude distance from degrees to width in meters
:param distance_in_degrees: a distance (python scalar or numpy array)
along the lat_0 circle expressed in degrees
:param lat_0: if missing or None, defaults to the internal lat_0, which is set as the mean
of all points passed to this object. Otherwise, expresses the latitude of the circle along
which the lon2w(distance_in_degrees) must be converted to meters"""
deg2m_lat = DEG2M_LAT
deg2m_lon = deg2m_lat * np.cos(lat_0 / 180 * np.pi)
return distance_in_degrees * deg2m_lon
def w2lon(distance_in_meters, lat_0):
"""converts longitude distance from width in meters to degrees
:param distance_in_meters: a distance (python scalar or numpy array)
along the lat_0 circle expressed in meters
:param lat_0: if missing or None, defaults to the internal lat_0, which is set as the mean
of all points passed to this object. Otherwise, expresses the latitude (in degrees) of the
circle along which w2lon(distance_in_meters) must be converted to degrees"""
deg2m_lat = DEG2M_LAT # deg2m_lat = 2 * np.pi * 6371 * 1000 / 360
deg2m_lon = deg2m_lat * np.cos(lat_0 / 180 * np.pi)
return distance_in_meters / deg2m_lon
class MapHandler(object):
"""
Class handling bounds of a map given points (lons and lats)
"""
def __init__(self, lons, lats, map_margins):
"""Initializes a new MapHandler. If figure here is None, you **MUST**
call self.set_fig(fig) to calculate bounds and other stuff
when you have a ready figure"""
self.lons = lons if len(lons) else [0] # FIXME: use numpy arrays!!
self.lats = lats if len(lats) else [0]
self.max_lons, self.min_lons = max(self.lons), min(self.lons)
self.max_lats, self.min_lats = max(self.lats), min(self.lats)
self.lon_0, self.lat_0, self.llcrnrlon, self.llcrnrlat, self.urcrnrlon, self.urcrnrlat = \
getbounds(self.min_lons, self.min_lats, self.max_lons, self.max_lats, map_margins)
def _get_map_dims(self): # , fig_size_in_inches, colorbar=False):
"""Returns the map dimension width, height, in meters"""
max_lons, min_lons = self.urcrnrlon, self.llcrnrlon
max_lats, min_lats = self.urcrnrlat, self.llcrnrlat
height = lat2h(max_lats - min_lats)
width = lon2w(max_lons - min_lons, self.lat_0)
return width, height
def get_parallels(self, max_labels_count=8):
width, height = self._get_map_dims()
lat_0 = self.lat_0
N1 = int(np.ceil(height / max(width, height) * max_labels_count))
parallels = MapHandler._linspace(lat_0 - h2lat(height / 2),
lat_0 + h2lat(height / 2), N1)
return parallels
def get_meridians(self, max_labels_count=8):
width, height = self._get_map_dims()
lon_0 = self.lon_0
lat_0 = self.lat_0
N2 = int(np.ceil(width / max(width, height) * max_labels_count))
meridians = MapHandler._linspace(lon_0 - w2lon(width / 2, lat_0),
lon_0 + w2lon(width / 2, lat_0), N2)
meridians[meridians > 180] -= 360
return meridians
@staticmethod
def _linspace(val1, val2, N):
"""
returns around N 'nice' values between val1 and val2. Copied from obspy.plot_map
"""
dval = val2 - val1
round_pos = int(round(-np.log10(1. * dval / N)))
# Fake negative rounding as not supported by future as of now.
if round_pos < 0:
factor = 10 ** (abs(round_pos))
delta = round(2. * dval / N / factor) * factor / 2
else:
delta = round(2. * dval / N, round_pos) / 2
new_val1 = np.ceil(val1 / delta) * delta
new_val2 = np.floor(val2 / delta) * delta
N = (new_val2 - new_val1) / delta + 1
return np.linspace(new_val1, new_val2, N)
def _normalize(obj, size=None, dtype=None):
""""Casts" obj to a numpy array of the given optional size and optional dtype, and returns it.
If size is not None, the array must have length size. If not, and has length 1, it will be
resized to the specified size. Otherwise a ValueError is raised
If size is None, no resize will be in place and the array is returend as it is
Note: obj=None will be converted to the array [None], apparently in the current version of numpy
this wouldn't be the default (see argument ndmin=1)
:return an numpy array resulting to the coinversion of obj into array
:Examples:
"""
x = np.array(obj, ndmin=1) if dtype is None else np.array(obj, ndmin=1, dtype=dtype)
if size is None:
return np.array([]) if obj is None else x # if obj is None x is [None], return [] instead
try:
if len(x) == 1:
x = np.resize(x, size)
elif len(x) != size:
raise ValueError("invalid array length: %d. Expected %d" % (len(x), size))
except (ValueError, TypeError) as _err:
raise ValueError(str(_err))
return x
def torgba(html_str):
"""Converts html_str into a tuple of rgba colors all in [0, 1]
Curiously, matplotlib color functions do not provide this functionality for
'#RGBA' color formats
:param html_str: a valid html string in hexadecimal format.
Can have length 4, 7 or 9 such as #F1a, #fa98e3, #fc456a09
:return: a rgba vector, i.e. a 4-element numpy array of values in [0,1] denoting `html_str`
:raise: ValueError if html_str is invalid
"""
if len(html_str) not in (4, 7, 9) or not html_str[0] == '#':
raise ValueError("'%s' invalid html string" % html_str)
elif len(html_str) == 4:
rgb = [html_str[i:i+1]*2 for i in xrange(1, len(html_str))]
else:
rgb = [html_str[i:i+2] for i in xrange(1, len(html_str), 2)]
if len(rgb) == 3:
rgb += ['FF']
return np.true_divide(np.array([int(r, 16) for r in rgb]), 255)
def _shapeargs(lons, lats, labels, sizes, colors, markers, legend_labels):
lons = _normalize(lons, dtype=float) # basically: convert to float array if scalar (size=0)
lats = _normalize(lats, dtype=float) # basically: convert to float array if scalar (size=0)
if len(lons) != len(lats):
raise ValueError('mismatch in lengths: lons (%d) and lats (%d)' % (len(lons), len(lats)))
leng = len(lons)
labels = _normalize(labels, size=leng)
colors = _normalize(colors, size=leng)
markers = _normalize(markers, size=leng)
legend_labels = _normalize(legend_labels, size=leng)
# colors[np.isnan(colors) | (colors <= 0)] = 1.0 # nan colors default to 1 (black?)
sizes = _normalize(sizes, size=leng, dtype=float)
valid_points = np.logical_not(np.isnan(lons) | np.isnan(lats) | (sizes <= 0))
# return all points whose corresponding numeric values are not nan:
return (lons[valid_points],
lats[valid_points],
labels[valid_points],
sizes[valid_points],
colors[valid_points],
markers[valid_points],
legend_labels[valid_points])
# def get_ax_size(ax, fig):
# bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
# return bbox.width, bbox.height
def pix2inch(pix, fig):
"""Converts pixel to inches on a given matplotlib figure"""
return pix / fig.dpi
def inch2pix(inch, fig):
"""Converts inches to pixel on a given matplotlib figure"""
return inch * fig.dpi
def _joinargs(key_prefix, kwargs, **already_supplied_args):
'''updates already_supplied_args with kwargs using a given prefix in kwargs to identify
common keys. Used in plotmap for kwargs'''
key_prefix += "_"
len_prefix = len(key_prefix)
already_supplied_args.update({k[len_prefix:]: v
for k, v in kwargs.iteritems() if k.startswith(key_prefix)})
return already_supplied_args
def _mp_set_custom_props(drawfunc_retval, lines_props, labels_props):
"""Sets custom properties on drawparallels or drawmeridians return function.
drawfunc_retval is a dict of numbers mapped to tuples where the first element is a list of
matplotlib lines, and the second element is a list of matplotlib texts"""
_setprop(chain.from_iterable((lin for lin, lab in drawfunc_retval.itervalues())), lines_props)
_setprop(chain.from_iterable((lab for lin, lab in drawfunc_retval.itervalues())), labels_props)
def _setprop(iterator_of_mp_objects, props):
'''sets the given properties of an iterator of same type matplotlib objects'''
if not props:
return
prp = {}
for obj in iterator_of_mp_objects:
if not prp:
prp = {"set_%s" % name: val for name, val in props.iteritems()
if hasattr(obj, "set_%s" % name)}
for name, val in prp.iteritems():
getattr(obj, name)(val)
# values below CAN be None but CANNOT be arrays containing None's
def plotmap(lons,
lats,
labels=None,
legendlabels=None,
markers="o",
colors="#FF4400",
sizes=20,
cmap=None,
fontsize=None,
fontweight='regular',
fontcolor='k',
labels_h_offset=0,
labels_v_offset=0,
mapmargins='0.5deg',
figmargins=2,
arcgis_service='World_Street_Map',
arcgis_xpixels=1500,
arcgis_dpi=96,
urlfail='ignore',
maxmeridians=5,
maxparallels=5,
legend_pos='bottom',
legend_borderaxespad=1.5,
legend_ncol=1,
title=None,
show=False,
**kwargs): # @UnusedVariable
"""
Makes a scatter plot of points on a map background using ArcGIS REST API.
:param lons: (array-like of length N or scalar) Longitudes of the data points, in degreee
:param lats: (array-like of length N or scalar) Latitudes of the data points, in degree
:param labels: (array-like of length N or string. Default: None, no labels) Annotations
(labels) for the individual data points on the map. If non-array (e.g. string), the same value
will be applied to all points
:param legendlabels: (array-like of length N or string. Default: None, no legend)
Annotations (labels) for the legend. You can supply a sparse array where only some points
will be displayed on the legend. All points with no legend label will not show up in the
legend
:param sizes: (array-like of length N or number. Default: 20) Sizes (in points^2) of the
individual points in the scatter plot.
:param markers: (array-like of length N,
`MarkerStyle<http://matplotlib.org/api/markers_api.html#matplotlib.markers.MarkerStyle>`_ or
string. Default: 'o' - circle) The markers (shapes) to be drawn for each point on the map.
See `markers <http://matplotlib.org/api/markers_api.html#module-matplotlib.markers>`_ for
more information on the different styles of markers scatter supports. Marker can be either
an instance of the class or the text shorthand for a particular marker.
:param colors: (array-like of length N,
`matplotlib color <http://matplotlib.org/api/colors_api.html>`_, e.g. string.
Default: "#FF4400")
Colors for the markers (fill color). You can type color transparency by supplying string of 9
elements where the last two characters denote the transparency ('00' fully transparent,
'ff' fully opaque). Note that this is a feature not implemented in `matplotlib` colors, where
transparency is given as the last element of the numeric tuple (r, g, b, a)
:param fontsize: (numeric or None. Default: None) The fontsize for all texts drawn on the
map (labels, axis tick labels, legend). None uses the default figure font size for all. Custom
values for the individual text types (e.g. legend texts vs labels texts) can be supplied
via the `kwargs` argument and a given prefix (see below)
:param fontweight: (string or number. Default: 'regular') The font weight for all texts drawn
on the map (labels, axis tick labels, legend). Accepts the values (see
http://matplotlib.org/api/text_api.html#matplotlib.text.Text.set_weight):
```
[a numeric value in range 0-1000 | 'ultralight' | 'light' |
'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' |
'bold' | 'heavy' | 'extra bold' | 'black' ]
```
Custom
values for the individual text types (e.g. legend texts vs labels texts) can be supplied
via the `kwargs` argument and a given prefix (see below)
:param fontcolor: (`matplotlib color <http://matplotlib.org/api/colors_api.html>`_ or
string. Default: 'k', black) The font color for all texts drawn on the
map (labels, axis tick labels, legend). Custom
values for the individual text types (e.g. legend texts vs labels texts) can be supplied
via the `kwargs` argument and a given prefix (see below)
:param labels_h_offset: (string, number. Defaults None=0) The horizontal offset to be applied
to each label on the map relative to its point coordinates. Negative values will shift the
labels westward, positive values eastward. Useful for not overlapping
markers and labels.
If numeric, it is assumed to be the expressed in degrees. Otherwise, you can supply a string
with a number followed by one of the units 'm', 'km' or 'deg' (e.g., '5km', '0.5deg').
Note that this value affects the
`horizontalalignment` and `multialignment` properties of the labels
(for info see http://matplotlib.org/api/text_api.html#matplotlib.text.Text). Supplying
`labels_horizontalalignment` or `labels_ha` as optional argument will override
this behaviour (see `kwargs` below)
:param labels_v_offset: (string, number. Defaults None=0) The vertical offset to be applied
to each label on the map relative to its point coordinates. Negative values will shift the
labels southhward, positive values northward. See notes on `labels_h_offset` for details
Note that this value affects the
`verticalalignment` property of the labels
(for info see http://matplotlib.org/api/text_api.html#matplotlib.text.Text). Supplying
`labels_verticalalignment` or `labels_va` as optional argument will override
this behaviour (see `kwargs` below)
:param mapmargins: (array-like of 1,2,3,4 elements, numeric or string, or None=0.
Default: '0.5deg').
The map margins, i.e. how much the map has to 'expand/shrink' in any direction, relative
to the bounding box calculated to include all points.
If array-like, it behaves like the css 'margin' property of html: 4 elements will denote
[top, right, bottom, left], two elements will denote [top/bottom, left/right], three
elements [top, right/left, bottom], a single element array (or a single number or a string)
applies the value to all directions.
Finally, elements of the array must be expressed as the arguments `labels_h_offset` or
`labels_v_offset`: numbers denoting degrees or strings with units 'm', 'km', 'deg'. Negative
values will shrink the map.
If string, the argument will be first splitted using commas, semicolon or spaces as delimiters
(if no delimiter is found, the string is taken as a single chunk) and converted to an array-like
object.
:param figmargins: (array-like of 1,2,3,4 elements, number or None=0. Default:2) The
figure margins *in font height units* (e.g., 2 means: twice the font height). This argument
behaves exactly as `mapmargins` but expands/shrinks the distances between map and figure
(image) bounds. Useful to include axis tick labels or legend, if they overflow.
Note also that strings
are allowed only if they are parsable to float (e.g. "5,6; -12 1")
:param arcgis_service: (string, default: 'World_Street_Map'). The map image type, or
more technically the service for the map
hosted on ArcGIS server. Other values are 'ESRI_Imagery_World_2D'
(default in
`Basemap.arcgisimage <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap.arcgisimage>`_),
'World_Topo_Map', 'World_Terrain_Base'. For details, see:
http://server.arcgisonline.com/arcgis/rest/services.
:param arcgis_xpixels: (numeric, default: 3000). Requested number of image pixels
in x-direction (default is 400 in
`Basemap.arcgisimage <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap.arcgisimage>`_).
The documentation is quite unclear but this parameter seems to set the zoom of the image. From
this `link <http://basemaptutorial.readthedocs.io/en/latest/backgrounds.html#arcgisimage>`_:
A bigger number will ask a bigger image, so the image will have more detail.
So when the zoom is bigger, `xsize` must be bigger to maintain the resolution
:param urlfail: (string, 'raise' or 'ignore'. Default: 'ignore'). Tells what to do if the
ArcGIS requet fails (URLError, no internet connection etcetera). By default, on failure a raw
map with continents contour, and oceans will be plotted (good for
debug). Otherwise, the exception resulting from the web request is raised
:param maxmeridians: (numeric default: 5). The number of maximum meridians to be drawn. Set to
<=0 to hide meridians. Note that also x-axis labels are drawn.
To further manipulate meridians display, use any argument starting with
'mlabels_', 'mlines_' or 'meridians' (see `kwargs` below). E.g., to show only the labels and not
the lines, supply as argument `meridians_linewidth=0` or 'mlines_linewidth=0'.
:param maxparallels: (numeric default: 5). The number of maximum parallels to be drawn. Set to
<=0 to hide parallels. Note that also y-axis labels are drawn.
To further manipulate parallels display, use any argument starting with
'plabels_', 'plines_' or 'parallels' (see `kwargs` below). E.g., to show only the labels and not
the lines, supply as argument `parallels_linewidth=0` or 'plines_linewidth=0'.
:param legend_pos: (string in ['top'. 'bottom', 'right', 'left'], default='bottom'). The legend
location with respect to the map. It also adjusts the bounding box that the legend will be
anchored to.
For
customizing entirely the legend placement overriding this parameter, provide `legend_loc`
(and optionally `legend_bbox_to_anchor`) in `kwargs` (see below)
:param legend_borderaxespad: (numeric, default 1.5) The pad between the axes and legend border,
in font units
:param legend_ncol: (integer, default=1) The legend number of columns
:param title (string or None. Default: None): Title above plot (Note: not tested)
:param show (boolean, default: False): Whether to show the figure after plotting or not
(Note: not tested). Can be used to do further customization of the plot before showing it.
:param fig: (matplotlib figure or None, default: None). Note: deprecated, pass None as
supplying an already existing figure with other axes might break the figure layout
:param kwargs: any kind of additional argument passed to `matplotlib` and `Basemap` functions
or objects.
The name of the argument must be of the form
```
prefix_propertyname=propertyvalue
```
where prefix indicates the function/object to be called with keyword argument:
```
propertyname=propertyvalue
```
Current supported prefixes are (for available property names see links):
Prefix Passes `propertyname` to
============ ==================================================================================
arcgis `Basemap.arcgisimage <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap.arcgisimage>_
used to retrieve the background map using ArgGIS Server REST API. See also
http://basemaptutorial.readthedocs.io/en/latest/backgrounds.html#arcgisimage
basemap `Basemap <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap>`_
the object responsible of drawing and managing the map. Note that
`basemap_resolution=h` and `basemap_epsg=4326` by default.
labels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the point labels on the map
legend The `legend <http://matplotlib.org/api/legend_api.html#matplotlib.legend.Legend>`_.
See the already implemented arguments `legend_borderaxespad`,
`legend_ncol`
legendlabels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the text labels of the legend
meridians `Basemap.drawmeridians`. For more detailed settings on meridians, see
`mlines` and `mlabels`
parallels `Basemap.drawparallels`. For more detailed settings on parallels, see
`plines` and `plabels`
plines All `lines <http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_
used to display the parallels
plabels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the parallels labels on the y axis
mlines All `lines <http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_
used to display the meridians
mlabels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the meridians labels on the x axis
============ ==================================================================================
Examples
--------
- `legend_title='abc'` will call `legend(..., title='abc', ...)`
- `labels_path_effects=[PathEffects.withStroke(linewidth=2, foreground='white')]` will set the
a white contour around each label text
- `meridians_labelstyle="+/-"` will call `Basemap.drawmeridians(..., labelstyle="+/-", ...)`
Notes:
------
The objects referenced by `plines`, `plabels`, `mlines`, `mlabels` and `legendlabels`
cannot be initialized directly with the given properties, which will be set after they are
created assuming that for any property `foo` passed as keyword argument in their constructor
there exist a method `set_foo(...)` (which will be called with the given propertyvalue).
This is most likely always true according to matplotlib api, but we cannot assure it works
100% of the times
"""
lons, lats, labels, sizes, colors, markers, legendlabels =\
_shapeargs(lons, lats, labels, sizes, colors, markers, legendlabels)
# convert html strings to tuples of rgba values in [0.1] if the former are in string format,
# because (maybe too old matplotlib version?) colors in the format '#RGBA' are not supported
# Also, if cmap is provided, basemap.scatter calls matplotlib.scatter which
# wants float sequenes in case of color map
if colors.dtype.char in ('U', 'S'): # pylint: disable=no-member
colors = np.array([torgba(c) for c in colors])
fig = plt.figure()
map_ax = fig.add_axes([0, 0, 1, 1]) # set axes size the same as figure
# setup handler for managing basemap coordinates and meridians / parallels calculation:
handler = MapHandler(lons, lats, mapmargins)
kwa = _joinargs('basemap', kwargs,
llcrnrlon=handler.llcrnrlon,
llcrnrlat=handler.llcrnrlat,
urcrnrlon=handler.urcrnrlon,
urcrnrlat=handler.urcrnrlat,
epsg='4326', # 4326, # 3395, # 3857,
resolution='i', # 'h',
ax=map_ax)
bmap = Basemap(**kwa)
try:
kwa = _joinargs("arcgis", kwargs, service=arcgis_service, xpixels=arcgis_xpixels,
dpi=arcgis_dpi)
# set the map image via a map service. In case you need the returned values, note that
# This function returns an ImageAxis (or AxisImage, check matplotlib doc)
bmap.arcgisimage(**kwa)
except (URLError, HTTPError, socket.error) as exc:
# failed, maybe there is not internet connection
if urlfail == 'ignore':
# Print a simple map offline
bmap.drawcoastlines()
watercolor = '#4444bb'
bmap.fillcontinents(color='#eebb66', lake_color=watercolor)
bmap.drawmapboundary(fill_color=watercolor)
else:
raise
# draw meridians and parallels. From basemap.drawmeridians / drawparallels doc:
# returns a dictionary whose keys are the meridian values, and
# whose values are tuples containing lists of the
# matplotlib.lines.Line2D and matplotlib.text.Text instances
# associated with each meridian. Deleting an item from the
# dictionary removes the correpsonding meridian from the plot.
if maxparallels > 0:
kwa = _joinargs("parallels", kwargs, linewidth=1, fontsize=fontsize,
labels=[0, 1, 1, 0], fontweight=fontweight)
parallels = handler.get_parallels(maxparallels)
# Old basemap versions have problems with non-integer parallels.
try:
# Note: the method below # returns a list of text object
# represeting the tick labels
_dict = bmap.drawparallels(parallels, **kwa)
except KeyError:
parallels = sorted(list(set(map(int, parallels))))
_dict = bmap.drawparallels(parallels, **kwa)
# set custom properties:
kwa_lines = _joinargs("plines", kwargs)
kwa_labels = _joinargs("plabels", kwargs, color=fontcolor)
_mp_set_custom_props(_dict, kwa_lines, kwa_labels)
if maxmeridians > 0:
kwa = _joinargs("meridians", kwargs, linewidth=1, fontsize=fontsize,
labels=[1, 0, 0, 1], fontweight=fontweight)
meridians = handler.get_meridians(maxmeridians)
_dict = bmap.drawmeridians(meridians, **kwa)
# set custom properties:
kwa_lines = _joinargs("mlines", kwargs)
kwa_labels = _joinargs("mlabels", kwargs, color=fontcolor)
_mp_set_custom_props(_dict, kwa_lines, kwa_labels)
# fig.get_axes()[0].tick_params(direction='out', length=15) # does not work, check basemap
fig.bmap = bmap
# compute the native bmap projection coordinates for events.
# from the docs (this is kind of outdated, however leave here for the moment):
# Calling a Basemap class instance with the arguments lon, lat will
# convert lon/lat (in degrees) to x/y map projection
# coordinates (in meters). If optional keyword ``inverse`` is
# True (default is False), the inverse transformation from x/y
# to lon/lat is performed.
# For cylindrical equidistant projection (``cyl``), this
# does nothing (i.e. x,y == lon,lat).
# For non-cylindrical projections, the inverse transformation
# always returns longitudes between -180 and 180 degrees. For
# cylindrical projections (self.projection == ``cyl``,
# ``cea``, ``mill``, ``gall`` or ``merc``)
# the inverse transformation will return longitudes between
# self.llcrnrlon and self.llcrnrlat.
# Input arguments lon, lat can be either scalar floats,
# sequences, or numpy arrays.
# parse hoffset and voffset and assure they are at least arrays of 1 elements
# (for aligning text labels, see below)
hoffset = np.array(parse_distance(labels_h_offset, lats), copy=False, ndmin=1)
voffset = np.array(parse_distance(labels_v_offset), copy=False, ndmin=1)
lbl_lons = lons + hoffset
lbl_lats = lats + voffset
# convert labels coordinates:
xlbl, ylbl = bmap(lbl_lons, lbl_lats)
# plot point labels
max_points = -1 # negative means: plot all
if max_points < 0 or len(lons) < max_points:
# Set alignments which control also the corner point reference when placing labels
# from (FIXME: add ref?)
# horizontalalignment controls whether the x positional argument for the text indicates
# the left, center or right side of the text bounding box.
# verticalalignment controls whether the y positional argument for the text indicates
# the bottom, center or top side of the text bounding box.
# multialignment, for newline separated strings only, controls whether the different lines
# are left, center or right justified
ha = 'left' if hoffset[0] > 0 else 'right' if hoffset[0] < 0 else 'center'
va = 'bottom' if voffset[0] > 0 else 'top' if voffset[0] < 0 else 'center'
ma = ha
kwa = _joinargs("labels", kwargs, fontweight=fontweight, color=fontcolor,
zorder=100, fontsize=fontsize, horizontalalignment=ha,
verticalalignment=va, multialignment=ma)
for name, xpt, ypt in zip(labels, xlbl, ylbl):
# Check if the point can actually be seen with the current bmap
# projection. The bmap object will set the coordinates to very
# large values if it cannot project a point.
if xpt > 1e25:
continue
map_ax.text(xpt, ypt, name, **kwa)
# plot points
x, y = bmap(lons, lats)
# store handles to points, and relative labels, if any
leg_handles, leg_labels = [], []
# bmap.scatter accepts all array-like args except markers. Avoid several useless loops
# and do only those for distinct markers:
# unique markers (sorted according to their index in markers, not their value):
mrks = markers[np.sort(np.unique(markers, return_index=True)[1])]
for mrk in mrks:
# Note using masks with '==' (numpy==1.11.3):
#
# >>> a = np.array([1,2,3])
# >>> a == 3
# array([False, False, True], dtype=bool) # OK
# >>> a == None
# False # NOT AS EXPECTED!
# >>> np.equal(a, None)
# array([False, False, False], dtype=bool) # OK
#
# (Note also that a == None issues:
# FutureWarning: comparison to `None` will result in an elementwise object
# comparison in the future.)
#
# So the correct way is to write
# mask = np.equal(array, val) if val is None else (a == val)
m_mask = np.equal(markers, mrk) if mrk is None else markers == mrk # see above
__x = x[m_mask]
__y = y[m_mask]
__m = mrk
__s = sizes[m_mask]
__c = colors[m_mask]
__l = legendlabels[m_mask]
# unique legends (sorted according to their index in __l, not their value):
for leg in __l[np.sort(np.unique(__l, return_index=True)[1])]:
l_mask = np.equal(__l, leg) if leg is None else __l == leg # see above
_scatter = bmap.scatter(__x[l_mask],
__y[l_mask],
marker=mrk,
s=__s[l_mask],
c=__c[l_mask],
cmap=cmap,
zorder=10)
if leg:
leg_handles.append(_scatter)
leg_labels.append(leg)
if leg_handles:
# if we provided `legend_loc`, use that:
loc = kwargs.get('legend_loc', None)
bbox_to_anchor = None # defaults in matplotlib legend
# we do have legend to show. Adjust legend reference corner:
if loc is None:
if legend_pos == 'bottom':
loc = 'upper center'
bbox_to_anchor = (0.5, -0.05)
elif legend_pos == 'top':
loc = 'lower center'
bbox_to_anchor = (0.5, 1.05)
elif legend_pos == 'left':
loc = 'center right'
bbox_to_anchor = (-0.05, 0.5)
elif legend_pos == 'right':
loc = 'center left'
bbox_to_anchor = (1, 0.5)
else:
raise ValueError('invalid legend_pos value:"%s"' % legend_pos)
# The plt.legend has the prop argument which sets the font properties:
# family, style, variant, weight, stretch, size, fname. See
# http://matplotlib.org/api/font_manager_api.html#matplotlib.font_manager.FontProperties
# However, that property does not allow to set font color. So we
# use the get_text method of Legend. Note that we pass font size *now* even if
# setting it later works as well (the legend frame is resized accordingly)
kwa = _joinargs("legend", kwargs, scatterpoints=1, ncol=legend_ncol, loc=loc,
bbox_to_anchor=bbox_to_anchor, borderaxespad=legend_borderaxespad,
fontsize=fontsize)
# http://stackoverflow.com/questions/17411940/matplotlib-scatter-plot-legend
leg = map_ax.legend(leg_handles, leg_labels, **kwa)
# set properties supplied via 'legend_'
_setprop(leg.get_texts(), _joinargs("legendlabels", kwargs, color=fontcolor))
# re-position the axes. The REAL map aspect ratio seems to be this:
realratio_h_w = bmap.aspect
fig_w, fig_h = fig.get_size_inches()
figratio_h_w = np.true_divide(fig_h, fig_w)
if figratio_h_w >= realratio_h_w:
# we have margins (blank space) above and below
# thus, we assume:
map_w = fig_w
# and we calculate map_h
map_h = map_w * realratio_h_w
# assume there is the same amount of space above and below:
vpad = (fig_h - map_h) / 2.0
# hpad is zero:
hpad = 0
else:
# we have margins (blank space) left and right
# thus, we assume:
map_h = fig_h
# and consequently:
map_w = map_h / realratio_h_w
# assume there is the same amount of space above and below:
hpad = (fig_w - map_w) / 2.0
# wpad is zero:
vpad = 0
# calculate new fig dimensions EXACTLY as contour of the map
new_fig_w = fig_w - 2 * hpad
new_fig_h = fig_h - 2 * vpad
# now margins:
marginz = parse_margins(figmargins) # margins are in fontheight units. Get font height:
fontsize_inch = 0
if len(np.nonzero(marginz)[0]):
# Calculate the font size in pixels.
# We want to be consistent with matplotlib way of getting fontsize.
# inspecting matplotlib.legend.Legend.draw we end up with:
# 1. Get the renderer
rend = fig.canvas.get_renderer()
# 2. get the fontsize in points. We might use `fontsize` but it might be None and we want
# the default in case. There are several 'defaults' (rcParams['font.size'],
# rcParams["legend.fontsize"])... we don't care for now, use the first. How to get
# rcParams['font.size'] ? Either this: (see at matplotlib.Legend.__init__):
# fontsize_pt = FontProperties(size=fontsize, weight=fontweight).get_size_in_points()
# or simply do:
fontsize_pt = fontsize or rcParams['font.size']
# Now use renderer to convert to pixels:
# For info see matplotlib.text.Text.get_window_extent
fontsize_px = rend.points_to_pixels(fontsize_pt)
# finally inches:
fontsize_inch = pix2inch(rend.points_to_pixels(fontsize_px), fig)
# calculate insets in inches (top right bottom left)
insets_inch = marginz * fontsize_inch
# set to fig dimensions
new_fig_w += insets_inch[1] + insets_inch[3]
new_fig_h += insets_inch[0] + insets_inch[2]
fig.set_size_inches(new_fig_w, new_fig_h, forward=True)
# (forward necessary if fig is in GUI, let's set for safety)
# now the axes which are relative to the figure. Thus first normalize inches:
insets_inch /= [fig_h, fig_w, fig_h, fig_w]
# pos1 = map_ax.get_position() # get the original position
# NOTE: it seems that pos[0], pos[1] indicate the x and y of the LOWER LEFT corner, not
# upper left!
pos2 = [insets_inch[3], insets_inch[2],
1 - (insets_inch[1] + insets_inch[3]),
1 - (insets_inch[0] + insets_inch[2])]
map_ax.set_position(pos2)
if title:
plt.suptitle(title)
if show:
plt.show()
return fig
| gpl-3.0 |
yanlend/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/dates.py | 6 | 52305 | """
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.io/en/stable/>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.io/en/stable/>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from matplotlib import rcParams
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return str("UTC")
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
# Convert to UTC
tzi = getattr(dt, 'tzinfo', None)
if tzi is not None:
dt = dt.astimezone(UTC)
tzi = UTC
base = float(dt.toordinal())
# If it's sufficiently datetime-like, it will have a `date()` method
cdate = getattr(dt, 'date', lambda: None)()
if cdate is not None:
# Get a datetime object at midnight UTC
midnight_time = datetime.time(0, tzinfo=tzi)
rdt = datetime.datetime.combine(cdate, midnight_time)
# Append the seconds as a fraction of a day
base += (dt - rdt).total_seconds() / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/pylab_examples/load_converter.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = delta.total_seconds() / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(np.round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
DAYS_PER_YEAR: rcParams['date.autoformat.year'],
DAYS_PER_MONTH: rcParams['date.autoformat.month'],
1.0: rcParams['date.autoformat.day'],
1. / HOURS_PER_DAY: rcParams['date.autoformat.hour'],
1. / (MINUTES_PER_DAY): rcParams['date.autoformat.minute'],
1. / (SEC_PER_DAY): rcParams['date.autoformat.second'],
1. / (MUSECONDS_PER_DAY): rcParams['date.autoformat.microsecond'],
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> locator = AutoDateLocator()
>>> formatter = AutoDateFormatter(locator)
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: rcParams['date.autoformatter.year'],
DAYS_PER_MONTH: rcParams['date.autoformatter.month'],
1.0: rcParams['date.autoformatter.day'],
1. / HOURS_PER_DAY: rcParams['date.autoformatter.hour'],
1. / (MINUTES_PER_DAY):
rcParams['date.autoformatter.minute'],
1. / (SEC_PER_DAY):
rcParams['date.autoformatter.second'],
1. / (MUSECONDS_PER_DAY):
rcParams['date.autoformatter.microsecond']}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(tdelta.total_seconds())
numMicroseconds = np.floor(tdelta.total_seconds() * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if not interval == int(interval) or interval < 1:
raise ValueError("interval must be an integer greater than 0")
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(delta.total_seconds() * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
| gpl-3.0 |
RachitKansal/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
bhargav/scikit-learn | doc/conf.py | 26 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2015, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
JamesSample/ecosystem_services_impacts | Code/01_es_lu_cc.py | 1 | 21539 | #------------------------------------------------------------------------------
# Name: 01_es_lu_cc.py
# Purpose: Processing for the CREW project on ES, LUC and CC.
#
# Author: James Sample
#
# Created: 14/01/2015
# Copyright: (c) James Sample and JHI, 2015
# License: https://github.com/JamesSample/ecosystem_services_impacts/blob/master/LICENSE
#------------------------------------------------------------------------------
""" Processes the Future Flows (FF) climate data and estimate climate and land
use change effects on Ecosystem Services (ES). Reads workshop outputs and
performs the following steps:
1. For each ES, reads monthly rainfall and ET grids for the months
specified for both baseline and future periods. For the seasons of
interest, calculates the % change in rainfall and ET between
baseline and future.
2. Combines rainfall and runoff percentage changes into a qualitative
grid of change in runoff.
3. Estimates impacts grids for each ES for CC only, LUC only and CC &
LUC combined.
Inputs grids are supplied in HDF5 file format.
"""
import pandas as pd, h5py, numpy as np, matplotlib, matplotlib.pyplot as plt
import os, sys
from mpl_toolkits.axes_grid1 import ImageGrid
from osgeo import gdal, gdalconst, osr
def read_array_from_h5(h5, variable, model, year, month):
""" Read an array from a specified location in an H5 file.
Args:
h5: The open HDF5 file object
variable: The variable of interest ('rainfall' or 'pet')
model: The code for the climate model of interest (string)
year: Year (integer)
month: Month (integer)
Returns:
array
"""
dset_path = r'/ff_data/%s/%s/%s_%s' % (model, variable, variable, year)
data = h5.get(dset_path)[:,:,month-1].astype(float)
# Set NoData to NaN
data[data==-99] = np.nan
# Convert units
data = data/100
return data
def avg_rain_et(h5, st_yr, end_yr, months):
""" Calculate average rainfall and ET grids for the specified years and
months.
Args:
h5: The open HDF5 file object
st_yr: Start year for period of interest (integer)
end_yr: End year for period of interest (integer)
months: List of months of interest (integers)
Returns:
Tuple of arrays (average rainfall, average PET)
"""
# Empty arrays to store rainfall and ET totals
rn_tot = np.zeros((715, 485))
et_tot = np.zeros((715, 485))
# Total number of years to average over
years = end_yr + 1 - st_yr
# Loop over rainfall and ET
for year in range(st_yr, end_yr+1):
for month in months:
# Read rainfall and ET grids
rn = read_array_from_h5(h5, 'rainfall', model, year, month)
et = read_array_from_h5(h5, 'pet', model, year, month)
# Add to totals
rn_tot += rn
et_tot += et
# Average
rn_av = rn_tot/years
et_av = et_tot/years
return (rn_av, et_av)
def plot_avg_grids(base_rn_av, base_et_av, fut_rn_av, fut_et_av):
""" Plot the average rainfall and ET grids. Used for testing.
Args:
base_rn_av: Average rainfall grid for baseline period.
base_et_av: Average PET grid for baseline period.
fut_rn_av: Average rainfall grid for future period.
fut_et_av: Average PET grid for future period.
Returns:
None. Displays maps of each grid using same colour scale.
"""
# Get min and max values from grids
rnmin = min(np.nanmin(base_rn_av), np.nanmin(fut_rn_av))
rnmax = max(np.nanmax(base_rn_av), np.nanmax(fut_rn_av))
etmin = min(np.nanmin(base_et_av), np.nanmin(fut_et_av))
etmax = max(np.nanmax(base_et_av), np.nanmax(fut_et_av))
# Plot
fig = plt.figure()
grid = ImageGrid(fig, 111,
nrows_ncols = (1, 4),
axes_pad=0.5,
cbar_mode='each')
im0 = grid[0].imshow(base_rn_av, vmin=rnmin, vmax=rnmax,
interpolation='nearest')
grid.cbar_axes[0].colorbar(im0)
im1 = grid[1].imshow(fut_rn_av, vmin=rnmin, vmax=rnmax,
interpolation='nearest')
grid.cbar_axes[1].colorbar(im1)
im2 = grid[2].imshow(base_et_av, vmin=etmin, vmax=etmax,
interpolation='nearest')
grid.cbar_axes[2].colorbar(im2)
im3 = grid[3].imshow(fut_et_av, vmin=etmin, vmax=etmax,
interpolation='nearest')
grid.cbar_axes[3].colorbar(im3)
plt.show()
def plot_reclassified_grid(array, out_path, sup_title='Main title',
title='Sub-title'):
""" Plot and save the reclassified grid.
Args:
array: Grid of integers in range -2 to +2
out_path: Output file path (PNG or PDF)
sup_title: Main title for plot (string)
title: Sub-title for plot (string)
Returns:
None. Saves a plot to the specified path.
"""
# Make a color map of fixed colors
cmap = matplotlib.colors.ListedColormap(['Red', 'Orange', 'LimeGreen',
'DeepSkyBlue', 'Blue'])
bounds=[-2.5, -1.5, -0.5, 0.5, 1.5, 2.5]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
# Create axes for plot (A4 size)
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8.3,11.7))
# Plot the array, using the colours specified
img = axes.imshow(array, interpolation='nearest', origin='upper',
cmap=cmap, norm=norm)
# Add labels to plot
plt.title(title)
plt.suptitle(sup_title, fontsize=16, y=0.95)
plt.ylabel('Northing')
plt.xlabel('Easting')
plt.grid(True)
# Reformat the axis labels (mainly change the Y values into northings)
axes.set_yticks([35, 135, 235, 335, 435, 535, 635, 735])
axes.set_yticklabels([1200, 1100, 1000, 900, 800, 700, 600, 500])
axes.set_xticks([100, 200, 300, 400])
# Add axes for the color bar
cax = fig.add_axes([0.2, 0.785, 0.02, 0.10])
# Add the colour bar and set labels
cbar = fig.colorbar(img, cax=cax, cmap=cmap, norm=norm, boundaries=bounds,
ticks=[-2.2,-1.2,-0.2,0.8,1.8])
cbar.set_ticklabels(['Large decrease',
'Small decrease',
'Neutral',
'Small increase',
'Large increase'], update_ticks=True)
# Make the cbar ticks invisible
ticks = cbar.ax.get_yticklines()
for tick in ticks:
plt.setp(tick, alpha=0)
cbar_labels = plt.getp(cbar.ax.axes, 'yticklabels')
plt.setp(cbar_labels, fontsize=10)
# Save fig
plt.savefig(out_path, dpi=300)
## plt.show()
plt.clf()
plt.close()
def reclass_rn_et_grid(array):
""" Take an array of percentage changes and reclassify it according to:
% change | Class
x<=-15 | -2
-15<x<=-5 | -1
-5<x<=5 | 0
5<x<=15 | +1
15<x | +2
Args:
array: Array of percentage changes to be reclassified.
Returns:
Reclassified array
"""
# Create copy of array for reclass values
rc = array.copy()
rc[array<=-15] = -2
rc[(-15<array) & (array<=-5)] = -1
rc[(-5<array) & (array<=5)] = 0
rc[(5<array) & (array<=15)] = 1
rc[15<array] = 2
return rc
def reclass_ro(matrix_path, rn, et):
""" Generate reclassification matrix for runoff based on reclassified
change grids for rainfall and PET and the runoff reclassification
matrix from the workshop.
Args:
matrix_path: Path to CSV file representing runoff matrix.
rn: Reclassified rainfall grid from reclass_rn_et_grid
et: Reclassified PET grid from reclass_rn_et_grid
Returns:
Array (grid of integers representing change in runoff)
"""
# Read matrix
df = pd.read_csv(matrix_path, index_col=0)
# Grid of NaNs wih correct shape
ro = rn.copy()*np.nan
# Loop over inidces
for x, y in np.ndindex(ro.shape):
# Get values for change in rainfall and ET
et_ch = et[x, y]
rn_ch = rn[x, y]
# If both are not nan, reclassify
if (np.isfinite(et_ch) and np.isfinite(rn_ch)):
rc_val = df.ix[int(et_ch), str(int(rn_ch))]
ro[x, y] = rc_val
return ro
def reclass_es_ro(es_idx, ro):
""" Reclassify the runoff grid to estimate effects of runoff change on each
ES.
Args:
es_idx: The ID of the ES of interest in data frame ro_df
ro: The runoff change grid from reclass_ro
Returns:
Array (grid of integers representing change in ES)
"""
# Make a copy of the ro grid to update
es = ro.copy()
# Reclassify
for chng in [-2, -1, 0, 1, 2]:
es[ro==chng] = ro_df.ix[es_idx, 'RO_%d' % chng]
return es
def read_ascii(ascii_path,
xmin=0,
xmax=485000,
ymin=520000,
ymax=1235000,
exptd_rows=715,
exptd_cols=485,
exptd_px_wd=1000,
exptd_px_ht=-1000,
exptd_ndv=-9999):
""" Read an ASCII grid file, clip it to the specified bounding box and
return a numpy array.
Args:
xmin: Minimum Easting in OSGB1936 metres.
xmax: Maximum Easting in OSGB1936 metres.
ymin: Minimum Northing in OSGB1936 metres.
ymax: Maximum Northing in OSGB1936 metres.
exptd_rows: No. of rows expected in file.
exptd_cols: No. of columns expected in file.
exptd_px_wd: Cell width.
exptd_px_ht: Cell height.
exptd_ndv: No data value.
Returns:
Array (floats).
"""
# Register drivers
gdal.AllRegister()
# Process the file with GDAL
ds = gdal.Open(ascii_path, gdalconst.GA_ReadOnly)
if ds is None:
print 'Could not open ' + ascii_path
sys.exit(1)
# In order to select the first cell correctly, choose a point just within
# the top left corner of the specified bounding box.
x = xmin + 10
y = ymax - 10
# Dataset properties
geotransform = ds.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
# Calculate number of rows and cols to return
rows = abs(int((ymax-ymin)/pixelHeight))
cols = int((xmax-xmin)/pixelWidth)
# Select starting pixel
xOffset = int((x - originX) / pixelWidth)
yOffset = int((y - originY) / pixelHeight)
band = ds.GetRasterBand(1)
no_data_val = band.GetNoDataValue()
# Simple checking
assert rows == exptd_rows
assert cols == exptd_cols
assert pixelWidth == exptd_px_wd
assert pixelHeight == exptd_px_ht
assert no_data_val == exptd_ndv
# Read the data to an array
data = band.ReadAsArray(xOffset, yOffset, cols, rows)
# Close the dataset
ds = None
return data.astype(float)
def process_land_use_change(lu_mat_path, base, fut, esid, codes_df):
""" Estimate land use change (LUC) only effects for the specified ES.
Args:
lu_mat_path: Excel file containing land use matrices from the workshop.
base: Baseline land luse grid.
fut: Future land luse grid.
esid: ES ID from land use matrices Excel file
codes_df: Land use code look-up table (as data frame)
Returns:
Array (grid of integers representing change in ES)
"""
# Read matrix for this ES
lu_mat = pd.read_excel(lu_mat_path, sheetname='Land Use')
# Get row for start of matrix
st_row = (lu_mat['ES_ID']==esid).nonzero()[0][0] + 2
# Read matrix of interest
lu_mat = pd.read_excel(lu_mat_path, sheetname='Land Use', skiprows=st_row,
skip_footer=(120-6-st_row), parse_cols='C:I',
index_col=0)
# Perform reclassification
# Grid of NaNs wih correct shape
rc = base.copy()*np.nan
# Loop over inidces
for x, y in np.ndindex(base.shape):
# Get values for baseline and future LU
base_lu = base[x, y]
fut_lu = fut[x, y]
# If both are not nan, reclassify
if (np.isfinite(base_lu) and np.isfinite(fut_lu)):
# Get the base and fut LU as a string
base_str = codes_df.ix[int(base_lu)]['LU_Class']
fut_str = codes_df.ix[int(fut_lu)]['LU_Class']
rc_val = lu_mat.ix[base_str, fut_str]
rc[x, y] = rc_val
return rc
def process_land_use_and_climate_change(lucc_mat_path, lugrid, ccgrid, esid):
""" Estimate combined land use and climate change effects for the specified
ES.
Args:
lucc_mat_path: Excel file containing matrices from the workshop.
lugrid: The grid of land use change effects.
ccgrid: The grid of climate change effects.
esid: ES ID from workshop matrices Excel file.
Returns:
Array (grid of integers representing change in ES)
"""
# Read matrix for this ES
lucc_mat = pd.read_excel(lucc_mat_path, sheetname='CC_LU')
# Get row for start of matrix
st_row = (lucc_mat['ES_ID']==esid).nonzero()[0][0] + 2
# Read matrix of interest
lucc_mat = pd.read_excel(lucc_mat_path, sheetname='CC_LU', skiprows=st_row,
skip_footer=(108-5-st_row), parse_cols='C:I',
index_col=0)
# Perform reclassification
# Grid of NaNs wih correct shape
rc = lugrid.copy()*np.nan
# Loop over inidces
for x, y in np.ndindex(lugrid.shape):
# Get values for baseline and future LU
lu = lugrid[x, y]
cc = ccgrid[x, y]
# If both are not nan, reclassify
if (np.isfinite(lu) and np.isfinite(cc)):
# Get the base and fut LU as a string
rc_val = lucc_mat.ix[int(lu), int(cc)]
rc[x, y] = rc_val
return rc
def array_to_gtiff(out_path, data_array, ndv=-9999, xmin=0, ymax=1235000,
cell_size=1000):
""" Convert numpy array to 16-bit integer GeoTiff.
Args:
out_path: The .tif file to be created.
data_array: The (integer) data array to save.
ndv: No data value.
xmin: Minimum x (Easting) co-ordinate, in OSGB1936 metres
ymax: Maximim y (Northing) co-ordinate, in OSGB1936 metres
cell_size: Cell size (metres)
Returns:
None. Array is saved to specified path.
"""
# Copy data_array so that it is not modified
data = data_array.copy()
# Convert NaNs to NDV
data[np.isnan(data)] = ndv
# Get array shape
cols = data.shape[1]
rows = data.shape[0]
# Get driver
driver = gdal.GetDriverByName('GTiff') # NB can't directly create ArcInfo ASCII grids in this way
# Create a new raster data source
out_ds = driver.Create(out_path, cols, rows, 1, gdal.GDT_Int16)
# Get spatial ref details
srs = osr.SpatialReference()
srs.ImportFromEPSG(27700) # From EPSG for OSGB36 grid
# Write metadata
out_ds.SetGeoTransform((xmin, cell_size, 0.0, ymax, 0.0, -1*cell_size)) #(xmin, cellsize, 0, ymax, 0, -cellsize)
out_ds.SetProjection(srs.ExportToWkt())
out_band = out_ds.GetRasterBand(1)
out_band.SetNoDataValue(ndv)
out_band.WriteArray(data)
# Tidy up
del out_ds, out_band
# #############################################################################
# User input
# Climate data
ff_h5_path = r'D:\WBM_Development_2014\WBM_2014_Monthly_Input_File.h5'
# Runoff matrices
ro_path = r'D:\Eco_Services_Impacts\Matrices_Development\03_Group_1_Matrices\Runoff_Impacts_Grp1.csv'
ro_matrix_15 = r'D:\Eco_Services_Impacts\Matrices_Development\02_Common_Matrices\Runoff_Matrix_15pct.csv'
# Land use data
base_path = r'D:\Eco_Services_Impacts\Land_Use\baseline_lu_lcm07.txt'
fut_path = r'D:\Eco_Services_Impacts\Land_Use\future_lu_2050.txt'
# Land use matrices
lu_classes_path = r'D:\Eco_Services_Impacts\Land_Use\Land_Use_Classes.csv'
lu_matrices_path = r'D:\Eco_Services_Impacts\Matrices_Development\03_Group_1_Matrices\Land_Use_Matrices_Grp1.xlsx'
# Land use and climate combined matrices
lucc_matrices_path = r'D:\Eco_Services_Impacts\Matrices_Development\03_Group_1_Matrices\Climate_And_Land_Use_Matrices_Grp1.xlsx'
# Output folders
out_pdf_fold = r'D:\Eco_Services_Impacts\Model_Output\02_Group_1_Output\PDF'
out_array_fold = r'D:\Eco_Services_Impacts\Model_Output\02_Group_1_Output\GeoTiffs'
# Time periods to compare
base_st_yr, base_end_yr = 1961, 1990
fut_st_yr, fut_end_yr = 2041, 2070
# Future Flows models of interest
models = ['afixa', 'afixc', 'afixl', 'afixm', 'afixo', 'afixh',
'afixi', 'afixj', 'afixk', 'afgcx', 'afixq']
# #############################################################################
# Read LU grids
base = read_ascii(base_path)
base[base==-9999] = np.nan
fut = read_ascii(fut_path)
fut[fut==-9999] = np.nan
# Read LU class codes
codes_df = pd.read_csv(lu_classes_path, index_col=0)
# Read the runoff matrices
ro_df = pd.read_csv(ro_path, index_col=0)
# Open H5 file
h5 = h5py.File(ff_h5_path, 'r')
# Iterate over each ES
for idx in ro_df.index:
print '\nProcessing land use change impacts for %s.' % ro_df.ix[idx, 'ES']
# 1. Process land use change only
luc = process_land_use_change(lu_matrices_path, base, fut, idx, codes_df)
# Prepare to save
out_name = 'ES%02d_LUC' % idx
# Save array
out_array = os.path.join(out_array_fold, '%s.tif' % out_name)
array_to_gtiff(out_array, luc)
# Save PDF
out_pdf = os.path.join(out_pdf_fold, '%s.pdf' % out_name)
plot_reclassified_grid(luc, out_pdf,
sup_title='Change in %s' % ro_df.ix[idx, 'ES'],
title='(land use change only)' )
# 2. Process climate change only
# Get the relevant months for this ES
months = [int(i) for i in ro_df.ix[idx, 'Key_Months'].split(',')]
# Loop over climate models of interest
for model in models:
print ('Processing climate change impacts for '
'%s (model %s).' % (ro_df.ix[idx, 'ES'], model))
# 2.1. Baseline
base_rn_av, base_et_av = avg_rain_et(h5, base_st_yr, base_end_yr,
months)
# 2.2. Future
fut_rn_av, fut_et_av = avg_rain_et(h5, fut_st_yr, fut_end_yr,
months)
# Plot
# plot_avg_grids(base_rn_av, base_et_av, fut_rn_av, fut_et_av)
# Calculate % change
rn_pct = 100*(fut_rn_av - base_rn_av)/base_rn_av
et_pct = 100*(fut_et_av - base_et_av)/base_et_av
# Reclassify
rn_rc = reclass_rn_et_grid(rn_pct)
et_rc = reclass_rn_et_grid(et_pct)
# plot_reclassified_grid(rn_rc)
# plot_reclassified_grid(et_rc)
# Generate runoff grid
ro = reclass_ro(ro_matrix_15, rn_rc, et_rc)
# # Plot runoff grid
# plot_reclassified_grid(ro,
# sup_title='Change in runoff',
# title='(Model %s; %s)' % (model, months))
# Reclass ro grid to estimate ES impact
es = reclass_es_ro(idx, ro)
# Prepare to save
out_name = 'ES%02d_%s' % (idx, model)
# Save array
out_array = os.path.join(out_array_fold, '%s.tif' % out_name)
array_to_gtiff(out_array, es)
# Save PDF
out_pdf = os.path.join(out_pdf_fold, '%s.pdf' % out_name)
plot_reclassified_grid(es, out_pdf,
sup_title='Change in %s' % ro_df.ix[idx, 'ES'],
title='(climate model %s only)' % model)
# 3. Process combined land use and climate effects
print ('Processing climate and land use change impacts for '
'%s (model %s).' % (ro_df.ix[idx, 'ES'], model))
# Reclassify to get CC and LUC effects
cc_lu = process_land_use_and_climate_change(lucc_matrices_path, luc,
es, idx)
# Prepare to save
out_name = 'ES%02d_LUC_%s' % (idx, model)
# Save array
out_array = os.path.join(out_array_fold, '%s.tif' % out_name)
array_to_gtiff(out_array, cc_lu)
# Save PDF
out_pdf = os.path.join(out_pdf_fold, '%s.pdf' % out_name)
plot_reclassified_grid(cc_lu, out_pdf,
sup_title='Change in %s' % ro_df.ix[idx, 'ES'],
title='(climate and land use change together)')
# Close H5 file
h5.close()
print '\nFinished.' | mit |
silky/ProbablyOverthinkingIt | thinkstats2.py | 1 | 69096 | """This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else '_nolegend_'
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
return '%s(%s)' % (cls, str(self.d))
__repr__ = __str__
def __eq__(self, other):
return self.d == other.d
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
if min(self.d.keys()) is np.nan:
logging.warning('Hist: contains NaN, may not render correctly.')
return zip(*sorted(self.Items()))
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in sorted(self.d.items()):
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100.0
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
def __lt__(self, obj):
"""Less than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbLess(obj)
def __gt__(self, obj):
"""Greater than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbGreater(obj)
def __ge__(self, obj):
"""Greater than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self < obj)
def __le__(self, obj):
"""Less than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self > obj)
def Normalize(self, fraction=1.0):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0.0:
raise ValueError('Normalize: total probability is zero.')
#logging.warning('Normalize: total probability is zero.')
#return total
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0.0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
mean = 0.0
for x, p in self.d.items():
mean += p * x
return mean
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
var = 0.0
for x, p in self.d.items():
var += p * (x - mu) ** 2
return var
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def MaximumLikelihood(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 + v2, p1 * p2)
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def __mul__(self, other):
"""Computes the Pmf of the product of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.MulPmf(other)
except AttributeError:
return self.MulConstant(other)
def MulPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 * v2, p1 * p2)
return pmf
def MulConstant(self, other):
"""Computes the Pmf of the product of a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 * other, p1)
return pmf
def __div__(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.DivPmf(other)
except AttributeError:
return self.MulConstant(1/other)
__truediv__ = __div__
def DivPmf(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 / v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
return cdf.Max(k)
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100.0:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix.Incr(x, p1 * p2)
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf(object):
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else '_nolegend_'
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps))
__repr__ = __str__
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Values(self):
"""Returns a sorted list of values.
"""
return self.xs
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
# don't use +=, or else an int array + float yields int array
new.xs = new.xs + term
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
# don't use *=, or else an int array * float yields int array
new.xs = new.xs * factor
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0.0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0.0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def ValueArray(self, ps):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100.0)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100.0
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0.0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100.0) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000.0):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
If options includes low and high, it must also include n;
in that case the density is evaluated an n locations between
low and high, including both.
If options includes xs, the density is evaluate at those location.
Otherwise, self.GetLinspace is invoked to provide the locations.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def Sample(self, n):
"""Generates a random sample from the estimated Pdf.
n: size of sample
"""
# NOTE: we have to flatten because resample returns a 2-D
# array for some reason.
return self.kde.resample(n).flatten()
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100.0) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial PMF.
Returns the probabily of k successes in n trials with probability p.
"""
return stats.binom.pmf(k, n, p)
def EvalHypergeomPmf(k, N, K, n):
"""Evaluates the hypergeometric PMF.
Returns the probabily of k successes in n trials from a population
N with K successes in it.
"""
return stats.hypergeom.pmf(k, N, K, n)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
# don't use the scipy function (yet). for lam=0 it returns NaN;
# should be 0.0
# return stats.poisson.pmf(k, lam)
return lam ** k * math.exp(-lam) / special.gamma(k+1)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = EvalPoissonPmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta(object):
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
"""
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = [special.betainc(self.alpha, self.beta, x) for x in xs]
cdf = Cdf(xs, ps)
return cdf
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0.0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
return np.random.uniform(-jitter, +jitter, n) + values
def NormalProbabilityPlot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, **options)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
def MapToRanks(t):
"""Returns a list of ranks corresponding to the elements in t.
Args:
t: sequence of numbers
Returns:
list of integer ranks, starting at 1
"""
# pair up each value with its index
pairs = enumerate(t)
# sort by value
sorted_pairs = sorted(pairs, key=itemgetter(1))
# pair up each pair with its rank
ranked = enumerate(sorted_pairs)
# sort by index
resorted = sorted(ranked, key=lambda trip: trip[1][0])
# extract the ranks
ranks = [trip[0]+1 for trip in resorted]
return ranks
def LeastSquares(xs, ys):
"""Computes a linear least squares fit for ys as a function of xs.
Args:
xs: sequence of values
ys: sequence of values
Returns:
tuple of (intercept, slope)
"""
meanx, varx = MeanVar(xs)
meany = Mean(ys)
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
def FitLine(xs, inter, slope):
"""Fits a line to the given data.
xs: sequence of x
returns: tuple of numpy arrays (sorted xs, fit ys)
"""
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
def Residuals(xs, ys, inter, slope):
"""Computes residuals for a linear fit with parameters inter and slope.
Args:
xs: independent variable
ys: dependent variable
inter: float intercept
slope: float slope
Returns:
list of residuals
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
def CoefDetermination(ys, res):
"""Computes the coefficient of determination (R^2) for given residuals.
Args:
ys: dependent variable
res: residuals
Returns:
float coefficient of determination
"""
return 1 - Var(res) / Var(ys)
def CorrelatedGenerator(rho):
"""Generates standard normal variates with serial correlation.
rho: target coefficient of correlation
Returns: iterable
"""
x = random.gauss(0, 1)
yield x
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield x
def CorrelatedNormalGenerator(mu, sigma, rho):
"""Generates normal variates with serial correlation.
mu: mean of variate
sigma: standard deviation of variate
rho: target coefficient of correlation
Returns: iterable
"""
for x in CorrelatedGenerator(rho):
yield x * sigma + mu
def RawMoment(xs, k):
"""Computes the kth raw moment of xs.
"""
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
"""Computes the kth central moment of xs.
"""
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
def StandardizedMoment(xs, k):
"""Computes the kth standardized moment of xs.
"""
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
"""Computes skewness.
"""
return StandardizedMoment(xs, 3)
def Median(xs):
"""Computes the median (50th percentile) of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: float
"""
cdf = Cdf(xs)
return cdf.Value(0.5)
def IQR(xs):
"""Computes the interquartile of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: pair of floats
"""
cdf = Cdf(xs)
return cdf.Value(0.25), cdf.Value(0.75)
def PearsonMedianSkewness(xs):
"""Computes the Pearson median skewness.
"""
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def ReadFixedWidth(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pandas.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def ReadStataDct(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float, double=float)
var_info = []
for line in open(dct_file, **options):
match = re.search( r'_column\(([^)]*)\)', line)
if match:
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pandas.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables.loc[len(variables)-1, 'end'] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
def SampleRows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def ResampleRows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return SampleRows(df, len(df), replace=True)
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column]
cdf = Cdf(dict(weights))
indices = cdf.Sample(len(weights))
sample = df.loc[indices]
return sample
def PercentileRow(array, p):
"""Selects the row from a sorted array that maps to percentile p.
p: float 0--100
returns: NumPy array (one row)
"""
rows, cols = array.shape
index = int(rows * p / 100)
return array[index,]
def PercentileRows(ys_seq, percents):
"""Given a collection of lines, selects percentiles along vertical axis.
For example, if ys_seq contains simulation results like ys as a
function of time, and percents contains (5, 95), the result would
be a 90% CI for each vertical slice of the simulation results.
ys_seq: sequence of lines (y values)
percents: list of percentiles (0-100) to select
returns: list of NumPy arrays, one for each percentile
"""
nrows = len(ys_seq)
ncols = len(ys_seq[0])
array = np.zeros((nrows, ncols))
for i, ys in enumerate(ys_seq):
array[i,] = ys
array = np.sort(array, axis=0)
rows = [PercentileRow(array, p) for p in percents]
return rows
def Smooth(xs, sigma=2, **options):
"""Smooths a NumPy array with a Gaussian filter.
xs: sequence
sigma: standard deviation of the filter
"""
return ndimage.filters.gaussian_filter1d(xs, sigma, **options)
class HypothesisTest(object):
"""Represents a hypothesis test."""
def __init__(self, data):
"""Initializes.
data: data in whatever form is relevant
"""
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
self.test_stats = None
self.test_cdf = None
def PValue(self, iters=1000):
"""Computes the distribution of the test statistic and p-value.
iters: number of iterations
returns: float p-value
"""
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
self.test_cdf = Cdf(self.test_stats)
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def MaxTestStat(self):
"""Returns the largest test statistic seen during simulations.
"""
return max(self.test_stats)
def PlotCdf(self, label=None):
"""Draws a Cdf with vertical lines at the observed test stat.
"""
def VertLine(x):
"""Draws a vertical line at x."""
thinkplot.Plot([x, x], [0, 1], color='0.8')
VertLine(self.actual)
thinkplot.Cdf(self.test_cdf, label=label)
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
raise UnimplementedMethodException()
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
pass
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
raise UnimplementedMethodException()
def main():
pass
if __name__ == '__main__':
main()
| mit |
sniemi/SamPy | sandbox/src1/examples/multi_image.py | 1 | 1769 | #!/usr/bin/env python
'''
Make a set of images with a single colormap, norm, and colorbar.
It also illustrates colorbar tick labelling with a multiplier.
'''
from matplotlib.pyplot import figure, show, sci
from matplotlib import cm, colors
from matplotlib.font_manager import FontProperties
from numpy import amin, amax, ravel
from numpy.random import rand
Nr = 3
Nc = 2
fig = figure()
cmap = cm.cool
figtitle = 'Multiple images'
t = fig.text(0.5, 0.95, figtitle,
horizontalalignment='center',
fontproperties=FontProperties(size=16))
cax = fig.add_axes([0.2, 0.08, 0.6, 0.04])
w = 0.4
h = 0.22
ax = []
images = []
vmin = 1e40
vmax = -1e40
for i in range(Nr):
for j in range(Nc):
pos = [0.075 + j*1.1*w, 0.18 + i*1.2*h, w, h]
a = fig.add_axes(pos)
if i > 0:
a.set_xticklabels([])
# Make some fake data with a range that varies
# somewhat from one plot to the next.
data =((1+i+j)/10.0)*rand(10,20)*1e-6
dd = ravel(data)
# Manually find the min and max of all colors for
# use in setting the color scale.
vmin = min(vmin, amin(dd))
vmax = max(vmax, amax(dd))
images.append(a.imshow(data, cmap=cmap))
ax.append(a)
# Set the first image as the master, with all the others
# observing it for changes in cmap or norm.
norm = colors.Normalize(vmin=vmin, vmax=vmax)
for i, im in enumerate(images):
im.set_norm(norm)
if i > 0:
images[0].add_observer(im)
# The colorbar is also based on this master image.
fig.colorbar(images[0], cax, orientation='horizontal')
# We need the following only if we want to run this
# script interactively and be able to change the colormap.
sci(images[0])
show()
| bsd-2-clause |
suranap/qiime | qiime/quality_scores_plot.py | 9 | 6918 | #!/usr/bin/env python
# File created Sept 29, 2010
from __future__ import division
__author__ = "William Walters"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["William Walters", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "William Walters"
__email__ = "William.A.Walters@colorado.edu"
from matplotlib import use
use('Agg', warn=False)
from skbio.parse.sequences import parse_fasta
from numpy import arange, std, average
from pylab import plot, savefig, xlabel, ylabel, text, \
hist, figure, legend, title, show, xlim, ylim, xticks, yticks,\
scatter, subplot
from matplotlib.font_manager import fontManager, FontProperties
from qiime.util import gzip_open
from qiime.parse import parse_qual_score
def bin_qual_scores(qual_scores):
""" Bins qual score according to nucleotide position
qual_scores: Dict of label: numpy array of base scores
"""
qual_bins = []
qual_lens = []
for l in qual_scores.values():
qual_lens.append(len(l))
max_seq_size = max(qual_lens)
for base_position in range(max_seq_size):
qual_bins.append([])
for scores in qual_scores.values():
# Add score if exists in base position, otherwise skip
try:
qual_bins[base_position].append(scores[base_position])
except IndexError:
continue
return qual_bins
def get_qual_stats(qual_bins, score_min):
""" Generates bins of averages, std devs, total NT from quality bins"""
ave_bins = []
std_dev_bins = []
total_bases_bins = []
found_first_poor_qual_pos = False
suggested_trunc_pos = None
for base_position in qual_bins:
total_bases_bins.append(len(base_position))
std_dev_bins.append(std(base_position))
ave_bins.append(average(base_position))
if not found_first_poor_qual_pos:
if average(base_position) < score_min:
suggested_trunc_pos = qual_bins.index(base_position)
found_first_poor_qual_pos = True
return ave_bins, std_dev_bins, total_bases_bins, suggested_trunc_pos
def plot_qual_report(ave_bins,
std_dev_bins,
total_bases_bins,
score_min,
output_dir):
""" Plots, saves graph showing quality score averages, stddev.
Additionally, the total nucleotide count for each position is shown on
a second subplot
ave_bins: list with average quality score for each base position
std_dev_bins: list with standard deviation for each base position
total_bases_bins: list with total counts of bases for each position
score_min: lowest value that a given base call can be and still be
acceptable. Used to generate a dotted line on the graph for easy assay
of the poor scoring positions.
output_dir: output directory
"""
t = arange(0, len(ave_bins), 1)
std_dev_plus = []
std_dev_minus = []
for n in range(len(ave_bins)):
std_dev_plus.append(ave_bins[n] + std_dev_bins[n])
std_dev_minus.append(ave_bins[n] - std_dev_bins[n])
figure_num = 0
f = figure(figure_num, figsize=(8, 10))
figure_title = "Quality Scores Report"
f.text(.5, .93, figure_title, horizontalalignment='center', size="large")
subplot(2, 1, 1)
plot(t, ave_bins, linewidth=2.0, color="black")
plot(t, std_dev_plus, linewidth=0.5, color="red")
dashed_line = [score_min] * len(ave_bins)
l, = plot(dashed_line, '--', color='gray')
plot(t, std_dev_minus, linewidth=0.5, color="red")
legend(
('Quality Score Average',
'Std Dev',
'Score Threshold'),
loc='lower left')
xlabel("Nucleotide Position")
ylabel("Quality Score")
subplot(2, 1, 2)
plot(t, total_bases_bins, linewidth=2.0, color="blue")
xlabel("Nucleotide Position")
ylabel("Nucleotide Counts")
outfile_name = output_dir + "/quality_scores_plot.pdf"
savefig(outfile_name)
def write_qual_report(ave_bins,
std_dev_bins,
total_bases_bins,
output_dir,
suggested_trunc_pos):
""" Writes data in bins to output text file
ave_bins: list with average quality score for each base position
std_dev_bins: list with standard deviation for each base position
total_bases_bins: list with total counts of bases for each position
output_dir: output directory
suggested_trunc_pos: Position where average quality score dropped below
the score minimum (25 by default)
"""
outfile_name = output_dir + "/quality_bins.txt"
outfile = open(outfile_name, "w")
outfile.write("# Suggested nucleotide truncation position (None if " +
"quality score average did not drop below the score minimum threshold)" +
": %s\n" % suggested_trunc_pos)
outfile.write("# Average quality score bins\n")
outfile.write(",".join(str("%2.3f" % ave) for ave in ave_bins) + "\n")
outfile.write("# Standard deviation bins\n")
outfile.write(",".join(str("%2.3f" % std) for std in std_dev_bins) + "\n")
outfile.write("# Total bases per nucleotide position bins\n")
outfile.write(",".join(str("%d" %
total_bases) for total_bases in total_bases_bins))
def generate_histogram(qual_fp,
output_dir,
score_min=25,
verbose=True,
qual_parser=parse_qual_score):
""" Main program function for generating quality score histogram
qual_fp: quality score filepath
output_dir: output directory
score_min: minimum score to be considered a reliable base call, used
to generate dotted line on histogram for easy visualization of poor
quality scores.
qual_parser : function to apply to extract quality scores
"""
if qual_fp.endswith('.gz'):
qual_lines = gzip_open(qual_fp)
else:
qual_lines = open(qual_fp, "U")
qual_scores = qual_parser(qual_lines)
# Sort bins according to base position
qual_bins = bin_qual_scores(qual_scores)
# Get average, std dev, and total nucleotide counts for each base position
ave_bins, std_dev_bins, total_bases_bins, suggested_trunc_pos =\
get_qual_stats(qual_bins, score_min)
plot_qual_report(ave_bins, std_dev_bins, total_bases_bins, score_min,
output_dir)
# Save values to output text file
write_qual_report(ave_bins, std_dev_bins, total_bases_bins, output_dir,
suggested_trunc_pos)
if verbose:
print "Suggested nucleotide truncation position (None if quality " +\
"score average did not fall below the minimum score parameter): %s\n" %\
suggested_trunc_pos
| gpl-2.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/numpy/core/function_base.py | 23 | 6891 | from __future__ import division, absolute_import, print_function
__all__ = ['logspace', 'linspace']
from . import numeric as _nx
from .numeric import result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, TooHardError
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
start = start * 1.
stop = stop * 1.
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
y = _nx.arange(0, num, dtype=dt)
delta = stop - start
if num > 1:
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
y = y * delta
else:
# One might be tempted to use faster, in-place multiplication here,
# but this prevents step from overriding what class is produced,
# and thus prevents, e.g., use of Quantities; see gh-7142.
y = y * step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
# Multiply with delta to allow possible override of output class.
y = y * delta
y += start
if endpoint and num > 1:
y[-1] = stop
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
| mit |
RomainBrault/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 43 | 10272 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_singular_values():
# Check that the IncrementalPCA output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 100
X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
effective_rank=10, random_state=rng)
pca = PCA(n_components=10, svd_solver='full', random_state=rng).fit(X)
ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X)
assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
X_ipca = ipca.transform(X)
assert_array_almost_equal(np.sum(pca.singular_values_**2.0),
np.linalg.norm(X_pca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(ipca.singular_values_**2.0),
np.linalg.norm(X_ipca, "fro")**2.0, 2)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(pca.singular_values_,
np.sqrt(np.sum(X_pca**2.0, axis=0)), 12)
assert_array_almost_equal(ipca.singular_values_,
np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
effective_rank=3, random_state=rng)
pca = PCA(n_components=3, svd_solver='full', random_state=rng)
ipca = IncrementalPCA(n_components=3, batch_size=100)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat = np.dot(X_pca, pca.components_)
pca.fit(X_hat)
ipca.fit(X_hat)
assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/tests/tseries/test_frequencies.py | 1 | 29684 | from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import frequencies as libfrequencies, resolution
from pandas._libs.tslibs.ccalendar import MONTHS
from pandas._libs.tslibs.frequencies import (
INVALID_FREQ_ERR_MSG, FreqGroup, _period_code_map, get_freq, get_freq_code)
import pandas.compat as compat
from pandas.compat import is_platform_windows, range
from pandas import (
DatetimeIndex, Index, Series, Timedelta, Timestamp, date_range,
period_range)
from pandas.core.tools.datetimes import to_datetime
import pandas.util.testing as tm
import pandas.tseries.frequencies as frequencies
import pandas.tseries.offsets as offsets
class TestToOffset(object):
def test_to_offset_multiple(self):
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert (result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert (result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert (result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert (result == expected)
freqstr = '2h 20.5min'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(8430)
assert (result == expected)
freqstr = '1.5min'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(90)
assert (result == expected)
freqstr = '0.5S'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(500)
assert (result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert (result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert (result == expected)
freqstr = '1s0.25ms'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(1000250)
assert (result == expected)
freqstr = '1s0.25L'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(1000250)
assert (result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert (result == expected)
freqstr = '2SM'
result = frequencies.to_offset(freqstr)
expected = offsets.SemiMonthEnd(2)
assert (result == expected)
freqstr = '2SM-16'
result = frequencies.to_offset(freqstr)
expected = offsets.SemiMonthEnd(2, day_of_month=16)
assert (result == expected)
freqstr = '2SMS-14'
result = frequencies.to_offset(freqstr)
expected = offsets.SemiMonthBegin(2, day_of_month=14)
assert (result == expected)
freqstr = '2SMS-15'
result = frequencies.to_offset(freqstr)
expected = offsets.SemiMonthBegin(2)
assert (result == expected)
# malformed
with pytest.raises(ValueError, match='Invalid frequency: 2h20m'):
frequencies.to_offset('2h20m')
def test_to_offset_negative(self):
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert (result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert (result.n == -310)
freqstr = '-2SM'
result = frequencies.to_offset(freqstr)
assert (result.n == -2)
freqstr = '-1SMS'
result = frequencies.to_offset(freqstr)
assert (result.n == -1)
def test_to_offset_invalid(self):
# GH 13930
with pytest.raises(ValueError, match='Invalid frequency: U1'):
frequencies.to_offset('U1')
with pytest.raises(ValueError, match='Invalid frequency: -U'):
frequencies.to_offset('-U')
with pytest.raises(ValueError, match='Invalid frequency: 3U1'):
frequencies.to_offset('3U1')
with pytest.raises(ValueError, match='Invalid frequency: -2-3U'):
frequencies.to_offset('-2-3U')
with pytest.raises(ValueError, match='Invalid frequency: -2D:3H'):
frequencies.to_offset('-2D:3H')
with pytest.raises(ValueError, match='Invalid frequency: 1.5.0S'):
frequencies.to_offset('1.5.0S')
# split offsets with spaces are valid
assert frequencies.to_offset('2D 3H') == offsets.Hour(51)
assert frequencies.to_offset('2 D3 H') == offsets.Hour(51)
assert frequencies.to_offset('2 D 3 H') == offsets.Hour(51)
assert frequencies.to_offset(' 2 D 3 H ') == offsets.Hour(51)
assert frequencies.to_offset(' H ') == offsets.Hour()
assert frequencies.to_offset(' 3 H ') == offsets.Hour(3)
# special cases
assert frequencies.to_offset('2SMS-15') == offsets.SemiMonthBegin(2)
with pytest.raises(ValueError, match='Invalid frequency: 2SMS-15-15'):
frequencies.to_offset('2SMS-15-15')
with pytest.raises(ValueError, match='Invalid frequency: 2SMS-15D'):
frequencies.to_offset('2SMS-15D')
def test_to_offset_leading_zero(self):
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert (result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert (result.n == -194)
def test_to_offset_leading_plus(self):
freqstr = '+1d'
result = frequencies.to_offset(freqstr)
assert (result.n == 1)
freqstr = '+2h30min'
result = frequencies.to_offset(freqstr)
assert (result.n == 150)
for bad_freq in ['+-1d', '-+1h', '+1', '-7', '+d', '-m']:
with pytest.raises(ValueError, match='Invalid frequency:'):
frequencies.to_offset(bad_freq)
def test_to_offset_pd_timedelta(self):
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert (expected == result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert (expected == result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert (expected == result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert (expected == result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert (expected == result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert (result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert (expected == result)
td = Timedelta(microseconds=0)
pytest.raises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts(self):
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert (result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert (result1 == expected)
assert (result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert (result1 == expected)
result1 = frequencies.to_offset('SM')
result2 = frequencies.to_offset('SM-15')
expected = offsets.SemiMonthEnd(day_of_month=15)
assert (result1 == expected)
assert (result2 == expected)
result = frequencies.to_offset('SM-1')
expected = offsets.SemiMonthEnd(day_of_month=1)
assert (result == expected)
result = frequencies.to_offset('SM-27')
expected = offsets.SemiMonthEnd(day_of_month=27)
assert (result == expected)
result = frequencies.to_offset('SMS-2')
expected = offsets.SemiMonthBegin(day_of_month=2)
assert (result == expected)
result = frequencies.to_offset('SMS-27')
expected = offsets.SemiMonthBegin(day_of_month=27)
assert (result == expected)
# ensure invalid cases fail as expected
invalid_anchors = ['SM-0', 'SM-28', 'SM-29',
'SM-FOO', 'BSM', 'SM--1',
'SMS-1', 'SMS-28', 'SMS-30',
'SMS-BAR', 'SMS-BYR' 'BSMS',
'SMS--2']
for invalid_anchor in invalid_anchors:
with pytest.raises(ValueError, match='Invalid frequency: '):
frequencies.to_offset(invalid_anchor)
def test_ms_vs_MS():
left = frequencies.get_offset('ms')
right = frequencies.get_offset('MS')
assert left == offsets.Milli()
assert right == offsets.MonthBegin()
def test_rule_aliases():
rule = frequencies.to_offset('10us')
assert rule == offsets.Micro(10)
class TestFrequencyCode(object):
def test_freq_code(self):
assert get_freq('A') == 1000
assert get_freq('3A') == 1000
assert get_freq('-1A') == 1000
assert get_freq('Y') == 1000
assert get_freq('3Y') == 1000
assert get_freq('-1Y') == 1000
assert get_freq('W') == 4000
assert get_freq('W-MON') == 4001
assert get_freq('W-FRI') == 4005
for freqstr, code in compat.iteritems(_period_code_map):
result = get_freq(freqstr)
assert result == code
result = resolution.get_freq_group(freqstr)
assert result == code // 1000 * 1000
result = resolution.get_freq_group(code)
assert result == code // 1000 * 1000
def test_freq_group(self):
assert resolution.get_freq_group('A') == 1000
assert resolution.get_freq_group('3A') == 1000
assert resolution.get_freq_group('-1A') == 1000
assert resolution.get_freq_group('A-JAN') == 1000
assert resolution.get_freq_group('A-MAY') == 1000
assert resolution.get_freq_group('Y') == 1000
assert resolution.get_freq_group('3Y') == 1000
assert resolution.get_freq_group('-1Y') == 1000
assert resolution.get_freq_group('Y-JAN') == 1000
assert resolution.get_freq_group('Y-MAY') == 1000
assert resolution.get_freq_group(offsets.YearEnd()) == 1000
assert resolution.get_freq_group(offsets.YearEnd(month=1)) == 1000
assert resolution.get_freq_group(offsets.YearEnd(month=5)) == 1000
assert resolution.get_freq_group('W') == 4000
assert resolution.get_freq_group('W-MON') == 4000
assert resolution.get_freq_group('W-FRI') == 4000
assert resolution.get_freq_group(offsets.Week()) == 4000
assert resolution.get_freq_group(offsets.Week(weekday=1)) == 4000
assert resolution.get_freq_group(offsets.Week(weekday=5)) == 4000
def test_get_to_timestamp_base(self):
tsb = libfrequencies.get_to_timestamp_base
assert (tsb(get_freq_code('D')[0]) ==
get_freq_code('D')[0])
assert (tsb(get_freq_code('W')[0]) ==
get_freq_code('D')[0])
assert (tsb(get_freq_code('M')[0]) ==
get_freq_code('D')[0])
assert (tsb(get_freq_code('S')[0]) ==
get_freq_code('S')[0])
assert (tsb(get_freq_code('T')[0]) ==
get_freq_code('S')[0])
assert (tsb(get_freq_code('H')[0]) ==
get_freq_code('S')[0])
def test_freq_to_reso(self):
Reso = resolution.Resolution
assert Reso.get_str_from_freq('A') == 'year'
assert Reso.get_str_from_freq('Q') == 'quarter'
assert Reso.get_str_from_freq('M') == 'month'
assert Reso.get_str_from_freq('D') == 'day'
assert Reso.get_str_from_freq('H') == 'hour'
assert Reso.get_str_from_freq('T') == 'minute'
assert Reso.get_str_from_freq('S') == 'second'
assert Reso.get_str_from_freq('L') == 'millisecond'
assert Reso.get_str_from_freq('U') == 'microsecond'
assert Reso.get_str_from_freq('N') == 'nanosecond'
for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
# check roundtrip
result = Reso.get_freq(Reso.get_str_from_freq(freq))
assert freq == result
for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
assert freq == result
def test_resolution_bumping(self):
# see gh-14378
Reso = resolution.Resolution
assert Reso.get_stride_from_decimal(1.5, 'T') == (90, 'S')
assert Reso.get_stride_from_decimal(62.4, 'T') == (3744, 'S')
assert Reso.get_stride_from_decimal(1.04, 'H') == (3744, 'S')
assert Reso.get_stride_from_decimal(1, 'D') == (1, 'D')
assert (Reso.get_stride_from_decimal(0.342931, 'H') ==
(1234551600, 'U'))
assert Reso.get_stride_from_decimal(1.2345, 'D') == (106660800, 'L')
with pytest.raises(ValueError):
Reso.get_stride_from_decimal(0.5, 'N')
# too much precision in the input can prevent
with pytest.raises(ValueError):
Reso.get_stride_from_decimal(0.3429324798798269273987982, 'H')
def test_get_freq_code(self):
# frequency str
assert (get_freq_code('A') ==
(get_freq('A'), 1))
assert (get_freq_code('3D') ==
(get_freq('D'), 3))
assert (get_freq_code('-2M') ==
(get_freq('M'), -2))
# tuple
assert (get_freq_code(('D', 1)) ==
(get_freq('D'), 1))
assert (get_freq_code(('A', 3)) ==
(get_freq('A'), 3))
assert (get_freq_code(('M', -2)) ==
(get_freq('M'), -2))
# numeric tuple
assert get_freq_code((1000, 1)) == (1000, 1)
# offsets
assert (get_freq_code(offsets.Day()) ==
(get_freq('D'), 1))
assert (get_freq_code(offsets.Day(3)) ==
(get_freq('D'), 3))
assert (get_freq_code(offsets.Day(-2)) ==
(get_freq('D'), -2))
assert (get_freq_code(offsets.MonthEnd()) ==
(get_freq('M'), 1))
assert (get_freq_code(offsets.MonthEnd(3)) ==
(get_freq('M'), 3))
assert (get_freq_code(offsets.MonthEnd(-2)) ==
(get_freq('M'), -2))
assert (get_freq_code(offsets.Week()) ==
(get_freq('W'), 1))
assert (get_freq_code(offsets.Week(3)) ==
(get_freq('W'), 3))
assert (get_freq_code(offsets.Week(-2)) ==
(get_freq('W'), -2))
# Monday is weekday=0
assert (get_freq_code(offsets.Week(weekday=1)) ==
(get_freq('W-TUE'), 1))
assert (get_freq_code(offsets.Week(3, weekday=0)) ==
(get_freq('W-MON'), 3))
assert (get_freq_code(offsets.Week(-2, weekday=4)) ==
(get_freq('W-FRI'), -2))
def test_frequency_misc(self):
assert (resolution.get_freq_group('T') ==
FreqGroup.FR_MIN)
code, stride = get_freq_code(offsets.Hour())
assert code == FreqGroup.FR_HR
code, stride = get_freq_code((5, 'T'))
assert code == FreqGroup.FR_MIN
assert stride == 5
offset = offsets.Hour()
result = frequencies.to_offset(offset)
assert result == offset
result = frequencies.to_offset((5, 'T'))
expected = offsets.Minute(5)
assert result == expected
with pytest.raises(ValueError, match='Invalid frequency'):
get_freq_code((5, 'baz'))
with pytest.raises(ValueError, match='Invalid frequency'):
frequencies.to_offset('100foo')
with pytest.raises(ValueError, match='Could not evaluate'):
frequencies.to_offset(('', ''))
_dti = DatetimeIndex
class TestFrequencyInference(object):
def test_raise_if_period_index(self):
index = period_range(start="1/1/1990", periods=20, freq="M")
pytest.raises(TypeError, frequencies.infer_freq, index)
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
pytest.raises(ValueError, frequencies.infer_freq, index)
def test_business_daily(self):
index = _dti(['01/01/1999', '1/4/1999', '1/5/1999'])
assert frequencies.infer_freq(index) == 'B'
def test_business_daily_look_alike(self):
# GH 16624, do not infer 'B' when 'weekend' (2-day gap) in wrong place
index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
assert frequencies.infer_freq(index) is None
def test_day(self):
self._check_tick(timedelta(1), 'D')
def test_day_corner(self):
index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
assert frequencies.infer_freq(index) == 'D'
def test_non_datetimeindex(self):
dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000'])
assert frequencies.infer_freq(dates) == 'D'
def test_hour(self):
self._check_tick(timedelta(hours=1), 'H')
def test_minute(self):
self._check_tick(timedelta(minutes=1), 'T')
def test_second(self):
self._check_tick(timedelta(seconds=1), 'S')
def test_millisecond(self):
self._check_tick(timedelta(microseconds=1000), 'L')
def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
b = Timestamp(datetime.now())
for i in range(1, 5):
inc = base_delta * i
index = _dti([b + inc * j for j in range(3)])
if i > 1:
exp_freq = '%d%s' % (i, code)
else:
exp_freq = code
assert frequencies.infer_freq(index) == exp_freq
index = _dti([b + base_delta * 7] + [b + base_delta * j for j in range(
3)])
assert frequencies.infer_freq(index) is None
index = _dti([b + base_delta * j for j in range(3)] + [b + base_delta *
7])
assert frequencies.infer_freq(index) is None
def test_weekly(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
self._check_generated_range('1/1/2000', 'W-%s' % day)
def test_week_of_month(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
for i in range(1, 5):
self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day))
def test_fifth_week_of_month(self):
# Only supports freq up to WOM-4. See #9425
func = lambda: date_range('2014-01-01', freq='WOM-5MON')
pytest.raises(ValueError, func)
def test_fifth_week_of_month_infer(self):
# Only attempts to infer up to WOM-4. See #9425
index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
assert frequencies.infer_freq(index) is None
def test_week_of_month_fake(self):
# All of these dates are on same day of week and are 4 or 5 weeks apart
index = DatetimeIndex(["2013-08-27", "2013-10-01", "2013-10-29",
"2013-11-26"])
assert frequencies.infer_freq(index) != 'WOM-4TUE'
def test_monthly(self):
self._check_generated_range('1/1/2000', 'M')
def test_monthly_ambiguous(self):
rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000'])
assert rng.inferred_freq == 'M'
def test_business_monthly(self):
self._check_generated_range('1/1/2000', 'BM')
def test_business_start_monthly(self):
self._check_generated_range('1/1/2000', 'BMS')
def test_quarterly(self):
for month in ['JAN', 'FEB', 'MAR']:
self._check_generated_range('1/1/2000', 'Q-%s' % month)
def test_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'A-%s' % month)
def test_business_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'BA-%s' % month)
def test_annual_ambiguous(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
assert rng.inferred_freq == 'A-JAN'
def _check_generated_range(self, start, freq):
freq = freq.upper()
gen = date_range(start, periods=7, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
assert frequencies.infer_freq(index) == gen.freqstr
else:
inf_freq = frequencies.infer_freq(index)
is_dec_range = inf_freq == 'Q-DEC' and gen.freqstr in (
'Q', 'Q-DEC', 'Q-SEP', 'Q-JUN', 'Q-MAR')
is_nov_range = inf_freq == 'Q-NOV' and gen.freqstr in (
'Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB')
is_oct_range = inf_freq == 'Q-OCT' and gen.freqstr in (
'Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')
assert is_dec_range or is_nov_range or is_oct_range
gen = date_range(start, periods=5, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
assert frequencies.infer_freq(index) == gen.freqstr
else:
inf_freq = frequencies.infer_freq(index)
is_dec_range = inf_freq == 'Q-DEC' and gen.freqstr in (
'Q', 'Q-DEC', 'Q-SEP', 'Q-JUN', 'Q-MAR')
is_nov_range = inf_freq == 'Q-NOV' and gen.freqstr in (
'Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB')
is_oct_range = inf_freq == 'Q-OCT' and gen.freqstr in (
'Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')
assert is_dec_range or is_nov_range or is_oct_range
def test_infer_freq(self):
rng = period_range('1959Q2', '2009Q3', freq='Q')
rng = Index(rng.to_timestamp('D', how='e').astype(object))
assert rng.inferred_freq == 'Q-DEC'
rng = period_range('1959Q2', '2009Q3', freq='Q-NOV')
rng = Index(rng.to_timestamp('D', how='e').astype(object))
assert rng.inferred_freq == 'Q-NOV'
rng = period_range('1959Q2', '2009Q3', freq='Q-OCT')
rng = Index(rng.to_timestamp('D', how='e').astype(object))
assert rng.inferred_freq == 'Q-OCT'
def test_infer_freq_tz(self):
freqs = {'AS-JAN':
['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'],
'Q-OCT':
['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'],
'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'],
'W-SAT':
['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'],
'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'],
'H': ['2011-12-31 22:00', '2011-12-31 23:00',
'2012-01-01 00:00', '2012-01-01 01:00']}
# GH 7310
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for expected, dates in compat.iteritems(freqs):
idx = DatetimeIndex(dates, tz=tz)
assert idx.inferred_freq == expected
def test_infer_freq_tz_transition(self):
# Tests for #8772
date_pairs = [['2013-11-02', '2013-11-5'], # Fall DST
['2014-03-08', '2014-03-11'], # Spring DST
['2014-01-01', '2014-01-03']] # Regular Time
freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U',
'3600000000001N']
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for date_pair in date_pairs:
for freq in freqs:
idx = date_range(date_pair[0], date_pair[
1], freq=freq, tz=tz)
assert idx.inferred_freq == freq
index = date_range("2013-11-03", periods=5,
freq="3H").tz_localize("America/Chicago")
assert index.inferred_freq is None
def test_infer_freq_businesshour(self):
# GH 7905
idx = DatetimeIndex(
['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00'])
# hourly freq in a day must result in 'H'
assert idx.inferred_freq == 'H'
idx = DatetimeIndex(
['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00',
'2014-07-01 15:00', '2014-07-01 16:00', '2014-07-02 09:00',
'2014-07-02 10:00', '2014-07-02 11:00'])
assert idx.inferred_freq == 'BH'
idx = DatetimeIndex(
['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00', '2014-07-07 09:00',
'2014-07-07 10:00', '2014-07-07 11:00'])
assert idx.inferred_freq == 'BH'
idx = DatetimeIndex(
['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00', '2014-07-07 09:00',
'2014-07-07 10:00', '2014-07-07 11:00', '2014-07-07 12:00',
'2014-07-07 13:00', '2014-07-07 14:00', '2014-07-07 15:00',
'2014-07-07 16:00', '2014-07-08 09:00', '2014-07-08 10:00',
'2014-07-08 11:00', '2014-07-08 12:00', '2014-07-08 13:00',
'2014-07-08 14:00', '2014-07-08 15:00', '2014-07-08 16:00'])
assert idx.inferred_freq == 'BH'
def test_not_monotonic(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
rng = rng[::-1]
assert rng.inferred_freq == '-1A-JAN'
def test_non_datetimeindex2(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
vals = rng.to_pydatetime()
result = frequencies.infer_freq(vals)
assert result == rng.inferred_freq
def test_invalid_index_types(self):
# test all index types
for i in [tm.makeIntIndex(10), tm.makeFloatIndex(10),
tm.makePeriodIndex(10)]:
pytest.raises(TypeError, lambda: frequencies.infer_freq(i))
# GH 10822
# odd error message on conversions to datetime for unicode
if not is_platform_windows():
for i in [tm.makeStringIndex(10), tm.makeUnicodeIndex(10)]:
pytest.raises(ValueError, lambda: frequencies.infer_freq(i))
def test_string_datetimelike_compat(self):
# GH 6463
expected = frequencies.infer_freq(['2004-01', '2004-02', '2004-03',
'2004-04'])
result = frequencies.infer_freq(Index(['2004-01', '2004-02', '2004-03',
'2004-04']))
assert result == expected
def test_series(self):
# GH6407
# inferring series
# invalid type of Series
for s in [Series(np.arange(10)), Series(np.arange(10.))]:
pytest.raises(TypeError, lambda: frequencies.infer_freq(s))
# a non-convertible string
pytest.raises(ValueError, lambda: frequencies.infer_freq(
Series(['foo', 'bar'])))
# cannot infer on PeriodIndex
for freq in [None, 'L']:
s = Series(period_range('2013', periods=10, freq=freq))
pytest.raises(TypeError, lambda: frequencies.infer_freq(s))
# DateTimeIndex
for freq in ['M', 'L', 'S']:
s = Series(date_range('20130101', periods=10, freq=freq))
inferred = frequencies.infer_freq(s)
assert inferred == freq
s = Series(date_range('20130101', '20130110'))
inferred = frequencies.infer_freq(s)
assert inferred == 'D'
def test_legacy_offset_warnings(self):
freqs = ['WEEKDAY', 'EOM', 'W@MON', 'W@TUE', 'W@WED', 'W@THU',
'W@FRI', 'W@SAT', 'W@SUN', 'Q@JAN', 'Q@FEB', 'Q@MAR',
'A@JAN', 'A@FEB', 'A@MAR', 'A@APR', 'A@MAY', 'A@JUN',
'A@JUL', 'A@AUG', 'A@SEP', 'A@OCT', 'A@NOV', 'A@DEC',
'Y@JAN', 'WOM@1MON', 'WOM@2MON', 'WOM@3MON',
'WOM@4MON', 'WOM@1TUE', 'WOM@2TUE', 'WOM@3TUE',
'WOM@4TUE', 'WOM@1WED', 'WOM@2WED', 'WOM@3WED',
'WOM@4WED', 'WOM@1THU', 'WOM@2THU', 'WOM@3THU',
'WOM@4THU', 'WOM@1FRI', 'WOM@2FRI', 'WOM@3FRI',
'WOM@4FRI']
msg = INVALID_FREQ_ERR_MSG
for freq in freqs:
with pytest.raises(ValueError, match=msg):
frequencies.get_offset(freq)
with pytest.raises(ValueError, match=msg):
date_range('2011-01-01', periods=5, freq=freq)
| bsd-3-clause |
jcrudy/sklearntools | sklearntools/test/test_transformers.py | 1 | 3613 | from sklearntools.transformers import Constant, VariableTransformer, Identity,\
Censor, NanMap, Log
import numpy as np
import pandas
from numpy.testing.utils import assert_array_almost_equal
from sklearn.datasets.base import load_boston
from pyearth.earth import Earth
from sklearntools.calibration import ResponseTransformingEstimator
from sklearn.metrics.regression import r2_score
# from sklearntools.sym.printers import exec_module, model_to_code
def test_with_response_transformation():
X, y = load_boston(return_X_y=True)
log_y = np.log(y)
X = pandas.DataFrame(X, columns=['x%d' % i for i in range(X.shape[1])])
y = pandas.DataFrame(y, columns=['y'])
transformer = VariableTransformer(dict(y=Log(Identity('y'))))
model = ResponseTransformingEstimator(Earth(), transformer)
model.fit(X, y)
log_y_pred = model.predict(X)
assert r2_score(log_y, log_y_pred) > .8
assert r2_score(y, log_y_pred) < .1
def test_transformation_system():
np.random.seed(1)
x = Identity('x')
y = Identity('y')
z = Identity('z')
d = (x + y) / z
transformer = VariableTransformer(dict(d=d), exclusive=True)
X = pandas.DataFrame(np.random.normal(size=(10,3)), columns=['x','y','z'])
transformer.fit(X)
assert_array_almost_equal(transformer.transform(X)['d'], (X['x'] + X['y']) / X['z'])
# numpy_test_module = exec_module('numpy_test_module', model_to_code(transformer, 'numpy', 'transform', 'test_model'))
# assert_array_almost_equal(pandas.DataFrame(dict(zip(['x', 'y', 'z', 'd'], numpy_test_module.test_model(**X))))[['x', 'y', 'z', 'd']], transformer.transform(X))
def test_rate():
np.random.seed(1)
X = pandas.DataFrame({'count': np.random.poisson(1., size=100), 'duration': np.random.poisson(5., size=100)})
rate = Censor(Identity('count') / Identity('duration'), Identity('duration') < 4)
transformer = VariableTransformer(dict(rate=rate))
transformer.fit(X)
target = X['count'] / X['duration']
target[X['duration'] < 4] = np.nan
assert_array_almost_equal(transformer.transform(X)['rate'], target)
# numpy_test_module = exec_module('numpy_test_module', model_to_code(transformer, 'numpy', 'transform', 'test_model'))
# assert_array_almost_equal(pandas.DataFrame(dict(zip(['count', 'duration', 'rate'], numpy_test_module.test_model(**X))))[['count', 'duration', 'rate']], transformer.transform(X))
def test_uncensor():
X = pandas.DataFrame(np.random.normal(size=(10,3)), columns=['x','y','z'])
X.loc[1,'x'] = np.nan
X.loc[2, 'y'] = np.nan
transformer = NanMap({'x': 100.})
transformer.fit(X)
X_ = transformer.transform(X)
assert_array_almost_equal(X['y'], X_['y'])
assert not (X['x'] == X_['x']).all()
fix = X['x'].copy()
fix[1] = 100.
assert_array_almost_equal(fix, X_['x'])
def test_non_strict():
X = pandas.DataFrame(np.random.normal(size=(10,3)), columns=['x','y','z'])
X.loc[1,'x'] = np.nan
X.loc[2, 'y'] = np.nan
transformer = NanMap({'x': 100.,
'w': 0.})
transformer.fit(X)
X_ = transformer.transform(X)
assert_array_almost_equal(X['y'], X_['y'])
assert not (X['x'] == X_['x']).all()
fix = X['x'].copy()
fix[1] = 100.
assert_array_almost_equal(fix, X_['x'])
if __name__ == '__main__':
import sys
import nose
# This code will run the test in this file.'
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v'])
| bsd-3-clause |
skdaccess/skdaccess | skdaccess/geo/srtm/cache/data_fetcher.py | 2 | 10677 | # The MIT License (MIT)
# Copyright (c) 2016 Massachusetts Institute of Technology
#
# Authors: Cody Rude, Guillaume Rongier
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherCache, ImageWrapper
from skdaccess.utilities.support import convertToStr
from skdaccess.utilities.image_util import AffineGlobalCoords, convertBinCentersToEdges
# 3rd party imports
import pandas as pd
import numpy as np
import gdal
from pkg_resources import resource_filename
# Standard library imports
from collections import OrderedDict
from calendar import monthrange
from zipfile import ZipFile
import os
class DataFetcher(DataFetcherCache):
''' DataFetcher for retrieving data from the Shuttle Radar Topography Mission '''
def __init__(self, lat_tile_start, lat_tile_end, lon_tile_start, lon_tile_end,
username, password, arcsecond_sampling = 1, mask_water = True,
store_geolocation_grids=False):
'''
Initialize Data Fetcher
@param lat_tile_start: Latitude of the southwest corner of the starting tile
@param lat_tile_end: Latitude of the southwset corner of the last tile
@param lon_tile_start: Longitude of the southwest corner of the starting tile
@param lon_tile_end: Longitude of the southwest corner of the last tile
@param username: NASA Earth Data username
@param password: NASA Earth Data Password
@param arcsecond_sampling: Sample spacing of the SRTM data, either 1 arc-
second or 3 arc-seconds
@param mask_water: True if the water bodies should be masked, false otherwise
@param store_geolocation_grids: Store grids of latitude and longitude in the metadata
'''
assert arcsecond_sampling == 1 or arcsecond_sampling == 3, "Sampling should be 1 or 3 arc-seconds"
self.lat_tile_start = lat_tile_start
self.lat_tile_end = lat_tile_end
self.lon_tile_start = lon_tile_start
self.lon_tile_end = lon_tile_end
self.username = username
self.password = password
self.arcsecond_sampling = arcsecond_sampling
self.mask_water = mask_water
self.store_geolocation_grids = store_geolocation_grids
self._missing_data_projection = '\n'.join([
'GEOGCS["WGS 84",',
' DATUM["WGS_1984",',
' SPHEROID["WGS 84",6378137,298.257223563,',
' AUTHORITY["EPSG","7030"]],',
' AUTHORITY["EPSG","6326"]],',
' PRIMEM["Greenwich",0,',
' AUTHORITY["EPSG","8901"]],',
' UNIT["degree",0.0174532925199433,',
' AUTHORITY["EPSG","9122"]],',
' AUTHORITY["EPSG","4326"]]'
])
super(DataFetcher, self).__init__()
def output(self):
'''
Generate SRTM data wrapper
@return SRTM Image Wrapper
'''
lat_tile_array = np.arange(self.lat_tile_start, self.lat_tile_end+1)
lon_tile_array = np.arange(self.lon_tile_start, self.lon_tile_end+1)
lat_grid,lon_grid = np.meshgrid(lat_tile_array, lon_tile_array)
lat_grid = lat_grid.ravel()
lon_grid = lon_grid.ravel()
filename_root = '.SRTMGL1.'
base_url = 'https://e4ftl01.cr.usgs.gov/MEASURES/'
folder_root = 'SRTMGL1.003/2000.02.11/'
if self.arcsecond_sampling == 3:
filename_root = '.SRTMGL3.'
folder_root = 'SRTMGL3.003/2000.02.11/'
base_url += folder_root
filename_list = []
for lat, lon in zip(lat_grid, lon_grid):
if lat < 0:
lat_label = 'S'
lat = np.abs(lat)
else:
lat_label = 'N'
if lon < 0:
lon_label = 'W'
lon = np.abs(lon)
else:
lon_label = 'E'
filename_list.append(lat_label + convertToStr(lat, 2) + lon_label + convertToStr(lon, 3) + filename_root + 'hgt.zip')
if self.mask_water == True:
filename_list.append(lat_label + convertToStr(lat, 2) + lon_label + convertToStr(lon, 3) + filename_root + 'num.zip')
# Read in list of available data
srtm_list_filename = 'srtm_gl1.txt'
if self.arcsecond_sampling == 3:
srtm_list_filename = 'srtm_gl3.txt'
srtm_support_filename = resource_filename('skdaccess', os.path.join('support',srtm_list_filename))
available_file_list = open(srtm_support_filename).readlines()
available_file_list = [filename.strip() for filename in available_file_list]
requested_files = pd.DataFrame({'Filename' : filename_list})
requested_files['Valid'] = [ '.'.join(filename.split('.')[0:-2]) in available_file_list for filename in filename_list ]
valid_filename_list = requested_files.loc[ requested_files['Valid']==True, 'Filename'].tolist()
url_list = [base_url + filename for filename in valid_filename_list]
downloaded_file_list = self.cacheData('srtm', url_list, self.username, self.password,
'https://urs.earthdata.nasa.gov')
requested_files.loc[ requested_files['Valid']==True, 'Full Path'] = downloaded_file_list
def getCoordinates(filename):
'''
Determine the longitude and latitude of the lowerleft corner of the input filename
@param in_filename: Input SRTM filename
@return Latitude of southwest corner, Longitude of southwest corner
'''
lat_start = int(filename[1:3])
if filename[0] == 'S':
lat_start *= -1
lon_start = int(filename[4:7])
if filename[3] == 'W':
lon_start *= -1
return lat_start, lon_start
data_dict = OrderedDict()
metadata_dict = OrderedDict()
array_shape = (3601,3601)
if self.arcsecond_sampling == 3:
array_shape = (1201,1201)
file_slice = slice(None)
water_value = 0
if self.mask_water == True:
file_slice = slice(0, -1, 2)
water_value = np.nan
for i in requested_files.index[file_slice]:
hgt_full_path = requested_files.at[i, 'Full Path']
hgt_filename = requested_files.at[i, 'Filename']
label = hgt_filename[:7]
lat_start, lon_start = getCoordinates(hgt_filename)
metadata_dict[label] = OrderedDict()
x_res = 1.0 / (array_shape[0]-1)
y_res = 1.0 / (array_shape[1]-1)
extents = [
lon_start - x_res / 2,
lon_start + 1 + x_res / 2,
lat_start - y_res / 2,
lat_start + 1 + y_res / 2
]
if requested_files.at[i, 'Valid']:
masked_dem_data = np.ones(array_shape)
if self.mask_water == True and requested_files.at[i + 1, 'Valid']:
num_full_path = requested_files.at[i + 1, 'Full Path']
num_filename = requested_files.at[i + 1, 'Full Path']
zipped_num_data = ZipFile(num_full_path)
zipped_num_full_path = zipped_num_data.infolist()[0].filename
num_data = np.frombuffer(zipped_num_data.open(zipped_num_full_path).read(),
np.dtype('uint8')).reshape(array_shape)
masked_dem_data[(num_data == 1) | (num_data == 2)] = water_value
i += 1
zipped_hgt_data = ZipFile(hgt_full_path)
dem_dataset = gdal.Open(hgt_full_path, gdal.GA_ReadOnly)
dem_data = dem_dataset.ReadAsArray()
masked_dem_data *= dem_data
metadata_dict[label]['WKT'] = dem_dataset.GetProjection()
metadata_dict[label]['GeoTransform'] = dem_dataset.GetGeoTransform()
else:
geo_transform = []
geo_transform.append(extents[0])
geo_transform.append(x_res)
geo_transform.append(0)
geo_transform.append(extents[-1])
geo_transform.append(0)
geo_transform.append(-y_res)
metadata_dict[label]['WKT'] = self._missing_data_projection
metadata_dict[label]['GeoTransform'] = geo_transform
masked_dem_data = np.full(shape=array_shape, fill_value=water_value)
i += 1
data_dict[label] = masked_dem_data
metadata_dict[label]['Geolocation'] = AffineGlobalCoords(metadata_dict[label]['GeoTransform'], center_pixels=True)
metadata_dict[label]['extents'] = extents
if self.store_geolocation_grids:
lat_coords, lon_coords = np.meshgrid(np.linspace(lat_start+1, lat_start, array_shape[0]),
np.linspace(lon_start, lon_start+1, array_shape[1]),
indexing = 'ij')
metadata_dict[label]['Latitude'] = lat_coords
metadata_dict[label]['Longitude'] = lon_coords
return ImageWrapper(obj_wrap = data_dict, meta_data = metadata_dict)
| mit |
yilei0620/3D_Conditional_Gan | GenSample_obj.py | 1 | 4544 | import sys
sys.path.append('..')
import os
import json
from time import time
import numpy as np
from sklearn.externals import joblib
import scipy
from scipy import io
# from matplotlib import pyplot as plt
# from sklearn.externals import joblib
import theano
import theano.tensor as T
from lib import activations
from lib import updates
from lib import inits
from lib.rng import py_rng, np_rng
from lib.ops import batchnorm, conv_cond_concat, conv, dropout
from lib.theano_utils import floatX, sharedX
from lib.data_utils import OneHot, shuffle, iter_data
from lib.metrics import nnc_score, nnd_score
from load import load_shapenet_train, load_shapenet_test
relu = activations.Rectify()
sigmoid = activations.Sigmoid()
lrelu = activations.LeakyRectify()
bce = T.nnet.binary_crossentropy
parameters = {'objectNumber': 2, 'Nz' : 200, 'Channel' :(1,64,128,256,512), 'kernal':(4,4,4,4), 'batchsize': 50, 'Convlayersize':(64,32,16,8,4), 'Genlrt' : 0.001, 'Discrimlrt' : 0.00001 , 'beta' : 0.5, 'l2':2.5e-5, 'Genk' : 2 , 'niter':50, 'niter_decay' : 150}
for p in parameters:
tmp = p + " = parameters[p]"
exec(tmp)
# print conditional,type(batchsize),Channel[-1],kernal
gifn = inits.Normal(scale=0.02)
difn = inits.Normal(scale=0.02)
## filter_shape: (output channels, input channels, filter height, filter width, filter depth)
## load the parameters
# gen_params = [gw1, gw2, gw3, gw4, gw5, gwx]
# discrim_params = [dw1, dw2, dw3, dw4, dw5, dwy]
temp = joblib.load('models%d/50_gen_params.jl'%objectNumber)
gw1 = sharedX(temp[0])
gg1 = sharedX(temp[1])
gb1 = sharedX(temp[2])
gw2 = sharedX(temp[3])
gg2 = sharedX(temp[4])
gb2 = sharedX(temp[5])
gw3 = sharedX(temp[6])
gg3 = sharedX(temp[7])
gb3 = sharedX(temp[8])
gw4 = sharedX(temp[9])
gg4 = sharedX(temp[10])
gb4 = sharedX(temp[11])
gwx = sharedX(temp[12])
gen_params = [gw1, gg1, gb1, gw2, gg2, gb2, gw3, gg3, gb3, gw4 ,gg4, gb4, gwx]
##
def gen(Z, w1, g1, b1, w2, g2, b2, w3, g3, b3, w4, g4, b4, wx):
Gl1 = relu(batchnorm(T.dot(Z, w1), g=g1, b=b1))
Gl1 = Gl1.reshape((Gl1.shape[0],Channel[-1],Convlayersize[-1],Convlayersize[-1],Convlayersize[-1]))
input_shape = (None , None,Convlayersize[-1],Convlayersize[-1],Convlayersize[-1])
filter_shape = (Channel[-1] , Channel[-2], kernal[-1], kernal[-1], kernal[-1])
Gl2 = relu(batchnorm(conv(Gl1,w2,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g2, b = b2))
input_shape = (None , None,Convlayersize[-2],Convlayersize[-2],Convlayersize[-2])
filter_shape = (Channel[-2] , Channel[-3], kernal[-2], kernal[-2], kernal[-2])
Gl3 = relu(batchnorm(conv(Gl2,w3,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g3, b = b3))
input_shape = (None , None,Convlayersize[-3],Convlayersize[-3],Convlayersize[-3])
filter_shape = (Channel[-3] , Channel[-4], kernal[-3], kernal[-3], kernal[-3])
Gl4 = relu(batchnorm(conv(Gl3,w4,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g4, b= b4))
input_shape = (None, None, Convlayersize[-4],Convlayersize[-4],Convlayersize[-4])
filter_shape = (Channel[-4], Channel[-5], kernal[-4], kernal[-4], kernal[-4])
GlX = sigmoid(conv(Gl4,wx,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'))
return GlX
X = T.tensor5()
Z = T.matrix()
gX = gen(Z, *gen_params)
print 'COMPILING'
t = time()
# _train_g = theano.function([X, Z, Y], cost, updates=g_updates)
# _train_d = theano.function([X, Z, Y], cost, updates=d_updates)
_gen = theano.function([Z], gX)
print '%.2f seconds to compile theano functions'%(time()-t)
# trX, trY, ntrain = load_shapenet_train()
n = 10
nbatch = 10
rng = np.random.RandomState(int(time()))
# sample_ymb = floatX(np.asarray(np.eye(3)))
z_dist = scipy.io.loadmat('Z_dist_class2.mat')
z_mean = z_dist['mean']
z_mean = np.reshape(z_mean,(Nz,1))
z_std = z_dist['std']
z_std = np.reshape(z_std,(Nz,1))
def gen_z(z_dist,nbatch):
ret = np.zeros((nbatch,Nz))
for j in xrange(Nz):
z_tmp = np_rng.normal(z_mean[j],z_std[j],nbatch)
ret[:,j] = z_tmp
# print ret
return ret
try:
os.mkdir('Gen_models%d'%objectNumber)
except:
pass
for j in xrange(n/nbatch):
sample_zmb = floatX(gen_z(z_dist,nbatch))
samples = np.asarray(_gen(sample_zmb))
for i in xrange(nbatch):
io.savemat('Gen_models%d/Gen_example_%d.mat'%(objectNumber,nbatch*j+i),{'instance':samples[i,:,:,:],'Z':sample_zmb[i,:]})
# niter = 1
# niter_decay = 1
| mit |
felipemontefuscolo/bitme | tactic/bitmex_dummy_tactic.py | 1 | 1028 | from common.quote import Quote
from tactic import TacticInterface, ExchangeInterface, Symbol, OrderCommon, Fill
import pandas as pd
class BitmexDummyTactic(TacticInterface):
"""
This class is associated to orders issued by Bitmex
"""
def finalize(self) -> None:
pass
def handle_quote(self, quote: Quote) -> None:
pass
def handle_order_completed(self, order: OrderCommon) -> None:
pass
def handle_liquidation(self, pnl: float):
pass
def id(self):
return 'DUMMY'
def initialize(self, exchange: ExchangeInterface, preferences: dict) -> None:
pass
def get_symbol(self) -> Symbol:
pass
def handle_1m_candles(self, candles1m: pd.DataFrame) -> None:
pass
def handle_submission_error(self, failed_order: OrderCommon) -> None:
pass
def handle_fill(self, fill: Fill) -> None:
pass
def handle_cancel(self, order: OrderCommon) -> None:
pass
def handle_trade(self):
pass
| mpl-2.0 |
peastman/msmbuilder | msmbuilder/tests/test_kernel_approximation.py | 9 | 1158 | from __future__ import absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.kernel_approximation import Nystroem as NystroemR
from msmbuilder.decomposition.kernel_approximation import Nystroem, LandmarkNystroem
def test_nystroem_vs_sklearn():
np.random.seed(42)
X = np.random.randn(100, 5)
kernel = Nystroem(kernel='linear', random_state=42)
kernelR = NystroemR(kernel='linear', random_state=42)
y1 = kernel.fit_transform([X])[0]
y2 = kernelR.fit_transform(X)
assert_array_almost_equal(y1, y2)
def test_lndmrk_nystroem_approximation():
np.random.seed(42)
X = np.random.randn(100, 5)
u = np.arange(X.shape[0])[5::1]
v = np.arange(X.shape[0])[::1][:u.shape[0]]
lndmrks = X[np.unique((u, v))]
kernel = LandmarkNystroem(kernel='rbf', random_state=42)
kernelR = NystroemR(kernel='rbf', random_state=42)
y1_1 = kernel.fit_transform([X])[0]
kernel.landmarks = lndmrks
y1_2 = kernel.fit_transform([X])[0]
y2 = kernelR.fit_transform(X)
assert_array_almost_equal(y2, y1_1)
assert not all((np.abs(y2 - y1_2) > 1E-6).flatten())
| lgpl-2.1 |
nmayorov/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
moutai/scikit-learn | examples/neural_networks/plot_mlp_training_curves.py | 56 | 3596 | """
========================================================
Compare Stochastic learning strategies for MLPClassifier
========================================================
This example visualizes some training loss curves for different stochastic
learning strategies, including SGD and Adam. Because of time-constraints, we
use several small datasets, for which L-BFGS might be more suitable. The
general trend shown in these examples seems to carry over to larger datasets,
however.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
# different learning rate schedules and momentum parameters
params = [{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'algorithm': 'adam'}]
labels = ["constant learning-rate", "constant with momentum",
"constant with Nesterov's momentum",
"inv-scaling learning-rate", "inv-scaling with momentum",
"inv-scaling with Nesterov's momentum", "adam"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', 'linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# load / generate some toy datasets
iris = datasets.load_iris()
digits = datasets.load_digits()
data_sets = [(iris.data, iris.target),
(digits.data, digits.target),
datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
datasets.make_moons(noise=0.3, random_state=0)]
for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits',
'circles', 'moons']):
plot_on_dataset(*data, ax=ax, name=name)
fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
plt.show()
| bsd-3-clause |
garywu/pypedream | pypedream/plot/_filt.py | 1 | 2685 | import numpy
has_matplotlib = True
try:
from matplotlib import pyplot, figure
except ImportError:
has_matplotlib = False
from dagpype._core import filters
def _make_relay_call(fn, name):
def new_fn(*args, **kwargs):
@filters
def _dagpype_internal_fn_act(target):
try:
while True:
target.send((yield))
except GeneratorExit:
fn(*args, **kwargs)
target.close()
return _dagpype_internal_fn_act
new_fn.__name__ = name
new_fn.__doc__ = """
Convenience filter utility for corresponding function in pyplot.
Example:
>>> source([1, 2, 3, 4]) | plot.xlabel('x') | plot.ylabel('y') | plot.title('xy') | (plot.plot() | plot.savefig('foo.png'))
"""
return new_fn
_try_fns = [
'annotate',
'arrow',
'autogen_docstring',
'autoscale',
'autumn',
'axes',
'axhline',
'axhspan',
'axis',
'axvline',
'axvspan',
'barbs',
'bone',
'box',
'broken_barh',
'cla',
'clabel',
'clf',
'clim',
'cm',
'cohere',
'colorbar',
'colormaps',
'colors',
'connect',
'cool',
'copper',
'csd',
'dedent',
'delaxes',
'docstring',
'draw',
'figaspect',
'figimage',
'figlegend',
'figtext',
'figure',
'fill',
'fill_between',
'fill_betweenx',
'flag',
'gca',
'gcf',
'gci',
'get',
'gray',
'grid',
'hold',
'hot',
'hsv',
'jet',
'locator_params',
'margins',
'minorticks_off',
'minorticks_on',
'normalize',
'over',
'pcolor',
'pcolormesh',
'pink',
'plotfile',
'plotting',
'polar',
'prism',
'psd',
'quiver',
'quiverkey',
'rc',
'register_cmap',
'rgrids',
'sca',
'sci',
'set_cmap',
'setp',
'silent_list',
'specgram',
'spectral',
'spring',
'spy',
'stem',
'step',
'subplot',
'subplot2grid',
'subplot_tool',
'subplots',
'subplots_adjust',
'summer',
'suptitle',
'table',
'text',
'thetagrids',
'tick_params',
'ticklabel_format',
'tight_layout',
'title',
'tricontour',
'tricontourf',
'tripcolor',
'triplot',
'twinx',
'twiny',
'winter',
'xlabel',
'xlim',
'xscale',
'xticks',
'ylabel',
'ylim',
'yscale',
'yticks']
_fns = []
if has_matplotlib:
for fn in _try_fns:
try:
exec('%s = _make_relay_call(pyplot.%s, "%s")' % (fn, fn, fn))
_fns.append(fn)
except AttributeError:
pass
| bsd-3-clause |
nliolios24/textrank | share/doc/networkx-1.9.1/examples/graph/unix_email.py | 62 | 2683 | #!/usr/bin/env python
"""
Create a directed graph, allowing multiple edges and self loops, from
a unix mailbox. The nodes are email addresses with links
that point from the sender to the recievers. The edge data
is a Python email.Message object which contains all of
the email message data.
This example shows the power of XDiGraph to hold edge data
of arbitrary Python objects (in this case a list of email messages).
By default, load the sample unix email mailbox called "unix_email.mbox".
You can load your own mailbox by naming it on the command line, eg
python unixemail.py /var/spool/mail/username
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2005 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import email
from email.utils import getaddresses,parseaddr
import mailbox
import sys
# unix mailbox recipe
# see http://www.python.org/doc/current/lib/module-mailbox.html
def msgfactory(fp):
try:
return email.message_from_file(fp)
except email.Errors.MessageParseError:
# Don't return None since that will stop the mailbox iterator
return ''
if __name__ == '__main__':
import networkx as nx
try:
import matplotlib.pyplot as plt
except:
pass
if len(sys.argv)==1:
filePath = "unix_email.mbox"
else:
filePath = sys.argv[1]
mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox
G=nx.MultiDiGraph() # create empty graph
# parse each messages and build graph
for msg in mbox: # msg is python email.Message.Message object
(source_name,source_addr) = parseaddr(msg['From']) # sender
# get all recipients
# see http://www.python.org/doc/current/lib/module-email.Utils.html
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', [])
resent_tos = msg.get_all('resent-to', [])
resent_ccs = msg.get_all('resent-cc', [])
all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)
# now add the edges for this mail message
for (target_name,target_addr) in all_recipients:
G.add_edge(source_addr,target_addr,message=msg)
# print edges with message subject
for (u,v,d) in G.edges_iter(data=True):
print("From: %s To: %s Subject: %s"%(u,v,d['message']["Subject"]))
try: # draw
pos=nx.spring_layout(G,iterations=10)
nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16)
plt.savefig("unix_email.png")
plt.show()
except: # matplotlib not available
pass
| mit |
imaculate/scikit-learn | sklearn/linear_model/randomized_l1.py | 11 | 24849 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import _preprocess_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
from ..exceptions import ConvergenceWarning
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.randint(
0, 2, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_preprocess_data = staticmethod(_preprocess_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2, estimator=self)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = \
self._preprocess_data(X, y, self.fit_intercept, self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float64))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by subsampling the training data and
computing a Lasso estimate where the penalty of a random subset of
coefficients has been scaled. By performing this double
randomization several times, the method assigns high scores to
features that are repeatedly selected across randomizations. This
is known as stability selection. In short, features selected more
often are considered good features.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The s parameter used to randomly scale the penalty of different
features (See :ref:`User Guide <randomized_l1>` for details ).
Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learned more robust and almost independent of
the number of samples. The same property is not valid for
standardized data. However, if you wish to standardize, please
use `preprocessing.StandardScaler` before calling `fit` on an
estimator with `normalize=False`.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, Lasso, ElasticNet
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if isinstance(alpha, six.string_types) and alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float64))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Logistic Regression works by subsampling the training
data and fitting a L1-penalized LogisticRegression model where the
penalty of a random subset of coefficients has been scaled. By
performing this double randomization several times, the method
assigns high scores to features that are repeatedly selected across
randomizations. This is known as stability selection. In short,
features selected more often are considered good features.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The s parameter used to randomly scale the penalty of different
features (See :ref:`User Guide <randomized_l1>` for details ).
Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, LogisticRegression
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _preprocess_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize=normalize)
return X, y, X_offset, y, X_scale
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.randint(0, 2, size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
prabhjyotsingh/incubator-zeppelin | flink/interpreter/src/main/resources/python/zeppelin_pyflink.py | 10 | 2806 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyflink.common import *
from pyflink.dataset import *
from pyflink.datastream import *
from pyflink.table import *
from pyflink.table.catalog import *
from pyflink.table.descriptors import *
from pyflink.table.window import *
from pyflink.table.udf import *
import pyflink
from py4j.java_gateway import java_import
intp = gateway.entry_point
pyflink.java_gateway._gateway = gateway
pyflink.java_gateway.import_flink_view(gateway)
pyflink.java_gateway.install_exception_handler()
b_env = pyflink.dataset.ExecutionEnvironment(intp.getJavaExecutionEnvironment())
s_env = StreamExecutionEnvironment(intp.getJavaStreamExecutionEnvironment())
if intp.isFlink110():
bt_env = BatchTableEnvironment(intp.getJavaBatchTableEnvironment("blink"), True)
bt_env_2 = BatchTableEnvironment(intp.getJavaBatchTableEnvironment("flink"), False)
st_env = StreamTableEnvironment(intp.getJavaStreamTableEnvironment("blink"), True)
st_env_2 = StreamTableEnvironment(intp.getJavaStreamTableEnvironment("flink"), False)
else:
bt_env = BatchTableEnvironment(intp.getJavaBatchTableEnvironment("blink"))
bt_env_2 = BatchTableEnvironment(intp.getJavaBatchTableEnvironment("flink"))
st_env = StreamTableEnvironment(intp.getJavaStreamTableEnvironment("blink"))
st_env_2 = StreamTableEnvironment(intp.getJavaStreamTableEnvironment("flink"))
from zeppelin_context import PyZeppelinContext
#TODO(zjffdu) merge it with IPyFlinkZeppelinContext
class PyFlinkZeppelinContext(PyZeppelinContext):
def __init__(self, z, gateway):
super(PyFlinkZeppelinContext, self).__init__(z, gateway)
def show(self, obj, **kwargs):
from pyflink.table import Table
if isinstance(obj, Table):
if 'stream_type' in kwargs:
self.z.show(obj._j_table, kwargs['stream_type'], kwargs)
else:
print(self.z.showData(obj._j_table))
else:
super(PyFlinkZeppelinContext, self).show(obj, **kwargs)
z = __zeppelin__ = PyFlinkZeppelinContext(intp.getZeppelinContext(), gateway)
__zeppelin__._setup_matplotlib()
| apache-2.0 |
prasunroypr/digit-recognizer | source/defs.py | 1 | 6607 | ################################################################################
"""
Functions for Digit Recognition
Created on Wed Jun 01 00:00:00 2016
@author: Prasun Roy
@e-mail: prasunroy.pr@gmail.com
"""
################################################################################
# import modules
import matplotlib.pyplot as pplt
import numpy as np
import os
import pandas as pd
import skimage.feature as skim
import sklearn.preprocessing as pp
import time
from conf import _config
from conf import _configinfo
################################################################################
def _fscale(data, split=False, load=False, verbose=False):
# initialize scaler
scaler = pp.MinMaxScaler()
# initialize variables
config = _configinfo()
sdpath = config['root_data_path'] + 'scaled.npy'
# scale data
if verbose: print('scaling features............... ', end = '')
data = np.array(data, dtype='float64')
if load and os.path.isfile(sdpath):
m = np.load(sdpath)[0]
r = np.load(sdpath)[1]
r[r==0] = 1
data = (data - m) / r
elif split:
train = data[:config['train_d']]
valid = data[config['train_d']:]
scaler.fit(train)
m = scaler.data_min_
r = scaler.data_range_
train = scaler.transform(train)
valid = scaler.transform(valid)
data = np.vstack((train, valid))
else:
data = scaler.fit_transform(data)
m = scaler.data_min_
r = scaler.data_range_
if verbose: print('done')
# save scaled config
if not load: np.save(sdpath, np.vstack((m, r)))
# return scaled data
return data
################################################################################
def _haar(data, load=True, save=False, verbose=False):
return data
################################################################################
def _hogs(data, load=True, save=False, verbose=False):
# initialize config
config = _config()
# initialize variables
datapath = config['hogs_data_path']
data_hog = []
# load hog data if exists
if load and os.path.isfile(datapath):
if verbose: print('loading descriptors............ ', end = '')
data_hog = np.load(datapath)
if verbose: print('done')
# calculate hog data otherwise
else:
# initialize variables
ix = config['shape_x']
iy = config['shape_y']
bn = config['bins_n']
cx = config['cell_x']
cy = config['cell_y']
bw = config['blok_w']
bh = config['blok_h']
# perform hog
t_beg = time.time()
size = data.shape[0]
loop = 0
for image in data:
if verbose: print('\rextracting descriptors......... %d%%'
%(loop*100//size), end = '')
desc = skim.hog(image.reshape(ix, iy), orientations=bn,
pixels_per_cell=(cx, cy), cells_per_block=(bw, bh))
data_hog.append(desc)
loop = loop + 1
data_hog = np.array(data_hog, dtype='float64')
t_end = time.time()
if verbose: print('\rextracting descriptors......... done @ %8.2f sec'
%(t_end - t_beg))
# save data
if save:
if verbose: print('saving descriptors............. ', end = '')
np.save(datapath, data_hog)
if verbose: print('done')
# return hog
return data_hog
################################################################################
def _sift(data, load=True, save=False, verbose=False):
return data
################################################################################
def _surf(data, load=True, save=False, verbose=False):
return data
################################################################################
def _plot(classifier, train, valid, step=None, save=False, verbose=False):
# initialize config
config = _config()
# initialize variables
if step is None: step = config['steps_d']
plot_figs_head = config['classifier'] + '-' + config['preprocess']
plot_data_path = config['plot_data_path']
plot_figs_path = config['plot_figs_path']
m_train = train.shape[0]
m_valid = valid.shape[0]
X_valid = valid[:, 1:]
y_valid = valid[:, 0]
error_train = []
error_valid = []
sizes_train = []
# calculate data for plot
for i in range(0, m_train, step):
if verbose: print('\rgenerating plot................ %d%%'
%(i*100//m_train), end = '')
# randomly shuffle training data
np.random.shuffle(train)
# select subset of randomized training data
X_train = train[:i+step, 1:]
y_train = train[:i+step, 0]
# train classifier with selected data
classifier.fit(X_train, y_train)
# cross-validate classifier
p_train = classifier.predict(X_train)
p_valid = classifier.predict(X_valid)
# estimate errors
error_train.append(sum(y_train != p_train) / len(y_train))
error_valid.append(sum(y_valid != p_valid) / m_valid)
sizes_train.append(i+step)
error_train = np.array(error_train, dtype='float64')
error_valid = np.array(error_valid, dtype='float64')
sizes_train = np.array(sizes_train, dtype='uint32')
if verbose: print('\rgenerating plot................ done')
# plot data
pplt.plot(sizes_train, error_train, 'rs-', label='training error')
pplt.plot(sizes_train, error_valid, 'gs-', label='cross-validation error')
pplt.title(plot_figs_head.upper()+' Learning Curve')
pplt.xlabel('number of training instances')
pplt.ylabel('classification error')
pplt.legend()
xmin,xmax = pplt.xlim()
ymin,ymax = pplt.ylim()
pplt.axis([xmin, xmax+step, ymin, ymax+0.01])
pplt.grid(True)
# save data
if save:
if verbose: print('saving plot.................... ', end = '')
data = pd.DataFrame({'x1_TrainSizes':sizes_train,
'y1_TrainError':error_train,
'y2_ValidError':error_valid})
data.to_csv(plot_data_path, index=False)
pplt.savefig(plot_figs_path)
if verbose: print('done')
# display plot
pplt.show()
################################################################################
| gpl-3.0 |
endolith/scikit-image | skimage/feature/tests/test_util.py | 35 | 2818 | import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
from numpy.testing import assert_equal, assert_raises
from skimage.feature.util import (FeatureDetector, DescriptorExtractor,
_prepare_grayscale_input_2D,
_mask_border_keypoints, plot_matches)
def test_feature_detector():
assert_raises(NotImplementedError, FeatureDetector().detect, None)
def test_descriptor_extractor():
assert_raises(NotImplementedError, DescriptorExtractor().extract,
None, None)
def test_prepare_grayscale_input_2D():
assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 3, 3)))
assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 1)))
assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 1, 1)))
img = _prepare_grayscale_input_2D(np.zeros((3, 3)))
img = _prepare_grayscale_input_2D(np.zeros((3, 3, 1)))
img = _prepare_grayscale_input_2D(np.zeros((1, 3, 3)))
def test_mask_border_keypoints():
keypoints = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 0),
[1, 1, 1, 1, 1])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 2),
[0, 0, 1, 1, 1])
assert_equal(_mask_border_keypoints((4, 4), keypoints, 2),
[0, 0, 1, 0, 0])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 5),
[0, 0, 0, 0, 0])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 4),
[0, 0, 0, 0, 1])
@np.testing.decorators.skipif(plt is None)
def test_plot_matches():
fig, ax = plt.subplots(nrows=1, ncols=1)
shapes = (((10, 10), (10, 10)),
((10, 10), (12, 10)),
((10, 10), (10, 12)),
((10, 10), (12, 12)),
((12, 10), (10, 10)),
((10, 12), (10, 10)),
((12, 12), (10, 10)))
keypoints1 = 10 * np.random.rand(10, 2)
keypoints2 = 10 * np.random.rand(10, 2)
idxs1 = np.random.randint(10, size=10)
idxs2 = np.random.randint(10, size=10)
matches = np.column_stack((idxs1, idxs2))
for shape1, shape2 in shapes:
img1 = np.zeros(shape1)
img2 = np.zeros(shape2)
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches)
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
only_matches=True)
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
keypoints_color='r')
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
matches_color='r')
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| bsd-3-clause |
alexsavio/scikit-learn | examples/model_selection/plot_roc_crossval.py | 21 | 3477 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.model_selection.cross_val_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_splits=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
colors = cycle(['cyan', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange'])
lw = 2
i = 0
for (train, test), color in zip(cv.split(X, y), colors):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=lw, color=color,
label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=lw, color='k',
label='Luck')
mean_tpr /= cv.get_n_splits(X, y)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, color='g', linestyle='--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=lw)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
chaowu2009/stereo-vo | tools/capture_TwoCameras_saveImagesOnly.py | 1 | 2289 | import numpy as np
import cv2
import time
import matplotlib.pylab as plt
"""
Make sure that you hold the checkerboard horizontally (more checkers horizontally than vertically).
In order to get a good calibration you will need to move the checkerboard around in the camera frame such that:
the checkerboard is detected at the left and right edges of the field of view (X calibration)
the checkerboard is detected at the top and bottom edges of the field of view (Y calibration)
the checkerboard is detected at various angles to the camera ("Skew")
the checkerboard fills the entire field of view (Size calibration)
checkerboard tilted to the left, right, top and bottom (X,Y, and Size calibration)
"""
left = 1
right = 2
time_in_ms= 1000/100
#folder = "/home/cwu/Downloads/";
folder = "/home/hillcrest/project/stereo-calibration/calib_imgs/ARC/"
folder = "/home/hillcrest/project/stereo-calibration/calib_imgs/ARC/"
#folder = "D:/vision/stereo-calibration/calib_imgs/ARC/"
fp = open(folder + "timeStamp.txt","w")
WIDTH = 1280
HEIGHT = 720
WIDTH = 640
HEIGHT = 480
for counter in range(1,31):
millis = int(round(time.time() * 1000))
cap1 = cv2.VideoCapture(left)
cap1.set(cv2.CAP_PROP_FRAME_WIDTH,WIDTH)
cap1.set(cv2.CAP_PROP_FRAME_HEIGHT,HEIGHT)
cv2.waitKey(100)
ret, frame1 = cap1.read()
cap1.release()
cap2 = cv2.VideoCapture(right)
cap2.set(cv2.CAP_PROP_FRAME_WIDTH,WIDTH)
cap2.set(cv2.CAP_PROP_FRAME_HEIGHT,HEIGHT)
cv2.waitKey(100)
ret, frame2 = cap2.read()
cap2.release()
#frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
#frame2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
plt.subplot(121)
plt.imshow(frame1)
plt.title('left')
plt.subplot(122)
plt.imshow(frame2)
plt.title('right')
plt.show()
print('another capture', counter)
cv2.waitKey(100)
cv2.imwrite(folder + "img_left/left_" + str(counter) + ".jpg", frame1)
cv2.waitKey(time_in_ms)
cv2.imwrite(folder + "img_right/right_" + str(counter) + ".jpg", frame2)
fp.write(str(counter)+ ","+ str(millis) + "\n")
print("the ", counter, " pairs")
cv2.destroyAllWindows()
fp.close()
print('All Done \n')
| mit |
kevin-intel/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 10 | 16467 | """
Testing Recursive feature elimination
"""
from operator import attrgetter
import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy import sparse
from sklearn.feature_selection import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR, LinearSVR
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GroupKFold
from sklearn.compose import TransformedTargetRegressor
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_random_state
from sklearn.utils._testing import ignore_warnings
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier:
"""
Dummy classifier to test recursive feature elimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, y):
assert len(X) == len(y)
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, y=None):
return 0.
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def _more_tags(self):
return {"allow_nan": True}
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert len(rfe.ranking_) == X.shape[1]
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert len(rfe.ranking_) == X.shape[1]
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert X_r.shape == iris.data.shape
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert rfe.score(X, y) == clf.score(iris.data, iris.target)
assert_array_almost_equal(X_r, X_r_sparse.toarray())
@pytest.mark.parametrize("n_features_to_select", [-1, 2.1])
def test_rfe_invalid_n_features_errors(n_features_to_select):
clf = SVC(kernel="linear")
iris = load_iris()
rfe = RFE(estimator=clf, n_features_to_select=n_features_to_select,
step=0.1)
msg = f"n_features_to_select must be .+ Got {n_features_to_select}"
with pytest.raises(ValueError, match=msg):
rfe.fit(iris.data, iris.target)
def test_rfe_percent_n_features():
# test that the results are the same
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# there are 10 features in the data. We select 40%.
clf = SVC(kernel="linear")
rfe_num = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe_num.fit(X, y)
rfe_perc = RFE(estimator=clf, n_features_to_select=0.4, step=0.1)
rfe_perc.fit(X, y)
assert_array_equal(rfe_perc.ranking_, rfe_num.ranking_)
assert_array_equal(rfe_perc.support_, rfe_num.support_)
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert len(rfe.ranking_) == X.shape[1]
assert X_r.shape == iris.data.shape
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert len(rfecv.grid_scores_) == X.shape[1]
assert len(rfecv.ranking_) == X.shape[1]
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# In the event of cross validation score ties, the expected behavior of
# RFECV is to return the FEWEST features that maximize the CV score.
# Because test_scorer always returns 1.0 in this example, RFECV should
# reduce the dimensionality to a single feature (i.e. n_features_ = 1)
assert rfecv.n_features_ == 1
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2)
rfecv.fit(X, y)
assert len(rfecv.grid_scores_) == 6
assert len(rfecv.ranking_) == X.shape[1]
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Verifying that steps < 1 don't blow up.
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=.2)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert len(rfecv.grid_scores_) == X.shape[1]
assert len(rfecv.ranking_) == X.shape[1]
def test_rfecv_verbose_output():
# Check verbose=1 is producing an output.
from io import StringIO
import sys
sys.stdout = StringIO()
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, verbose=1)
rfecv.fit(X, y)
verbose_output = sys.stdout
verbose_output.seek(0)
assert len(verbose_output.readline()) > 0
def test_rfecv_grid_scores_size():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Non-regression test for varying combinations of step and
# min_features_to_select.
for step, min_features_to_select in [[2, 1], [2, 2], [3, 3]]:
rfecv = RFECV(estimator=MockClassifier(), step=step,
min_features_to_select=min_features_to_select)
rfecv.fit(X, y)
score_len = np.ceil(
(X.shape[1] - min_features_to_select) / step) + 1
assert len(rfecv.grid_scores_) == score_len
assert len(rfecv.ranking_) == X.shape[1]
assert rfecv.n_features_ >= min_features_to_select
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert rfe._estimator_type == "classifier"
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert score.min() > .7
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert sel.support_.sum() == n_features // 2
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert sel.support_.sum() == n_features // 2
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert sel.support_.sum() == n_features // 2
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert (np.max(rfe.ranking_) ==
formula1(n_features, n_features_to_select, step))
assert (np.max(rfe.ranking_) ==
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step)
rfecv.fit(X, y)
assert (rfecv.grid_scores_.shape[0] ==
formula1(n_features, n_features_to_select, step))
assert (rfecv.grid_scores_.shape[0] ==
formula2(n_features, n_features_to_select, step))
def test_rfe_cv_n_jobs():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
rfecv = RFECV(estimator=SVC(kernel='linear'))
rfecv.fit(X, y)
rfecv_ranking = rfecv.ranking_
rfecv_grid_scores = rfecv.grid_scores_
rfecv.set_params(n_jobs=2)
rfecv.fit(X, y)
assert_array_almost_equal(rfecv.ranking_, rfecv_ranking)
assert_array_almost_equal(rfecv.grid_scores_, rfecv_grid_scores)
def test_rfe_cv_groups():
generator = check_random_state(0)
iris = load_iris()
number_groups = 4
groups = np.floor(np.linspace(0, number_groups, len(iris.target)))
X = iris.data
y = (iris.target > 0).astype(int)
est_groups = RFECV(
estimator=RandomForestClassifier(random_state=generator),
step=1,
scoring='accuracy',
cv=GroupKFold(n_splits=2)
)
est_groups.fit(X, y, groups=groups)
assert est_groups.n_features_ > 0
@pytest.mark.parametrize(
'importance_getter',
[attrgetter('regressor_.coef_'), 'regressor_.coef_'])
@pytest.mark.parametrize('selector, expected_n_features',
[(RFE, 5), (RFECV, 4)])
def test_rfe_wrapped_estimator(importance_getter, selector,
expected_n_features):
# Non-regression test for
# https://github.com/scikit-learn/scikit-learn/issues/15312
X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
estimator = LinearSVR(random_state=0)
log_estimator = TransformedTargetRegressor(regressor=estimator,
func=np.log,
inverse_func=np.exp)
selector = selector(log_estimator, importance_getter=importance_getter)
sel = selector.fit(X, y)
assert sel.support_.sum() == expected_n_features
@pytest.mark.parametrize(
"importance_getter, err_type",
[("auto", ValueError),
("random", AttributeError),
(lambda x: x.importance, AttributeError),
([0], ValueError)]
)
@pytest.mark.parametrize("Selector", [RFE, RFECV])
def test_rfe_importance_getter_validation(importance_getter, err_type,
Selector):
X, y = make_friedman1(n_samples=50, n_features=10, random_state=42)
estimator = LinearSVR()
log_estimator = TransformedTargetRegressor(
regressor=estimator, func=np.log, inverse_func=np.exp
)
with pytest.raises(err_type):
model = Selector(log_estimator, importance_getter=importance_getter)
model.fit(X, y)
@pytest.mark.parametrize("cv", [None, 5])
def test_rfe_allow_nan_inf_in_x(cv):
iris = load_iris()
X = iris.data
y = iris.target
# add nan and inf value to X
X[0][0] = np.NaN
X[0][1] = np.Inf
clf = MockClassifier()
if cv is not None:
rfe = RFECV(estimator=clf, cv=cv)
else:
rfe = RFE(estimator=clf)
rfe.fit(X, y)
rfe.transform(X)
def test_w_pipeline_2d_coef_():
pipeline = make_pipeline(StandardScaler(), LogisticRegression())
data, y = load_iris(return_X_y=True)
sfm = RFE(pipeline, n_features_to_select=2,
importance_getter='named_steps.logisticregression.coef_')
sfm.fit(data, y)
assert sfm.transform(data).shape[1] == 2
@pytest.mark.parametrize('ClsRFE', [
RFE,
RFECV
])
def test_multioutput(ClsRFE):
X = np.random.normal(size=(10, 3))
y = np.random.randint(2, size=(10, 2))
clf = RandomForestClassifier(n_estimators=5)
rfe_test = ClsRFE(clf)
rfe_test.fit(X, y)
| bsd-3-clause |
Mako-kun/mangaki | mangaki/mangaki/utils/svd.py | 2 | 5410 | from django.contrib.auth.models import User
from mangaki.models import Rating, Work, Recommendation
from mangaki.utils.chrono import Chrono
from mangaki.utils.values import rating_values
from scipy.sparse import lil_matrix
from sklearn.utils.extmath import randomized_svd
import numpy as np
from django.db import connection
import pickle
import json
import math
NB_COMPONENTS = 10
TOP = 10
class MangakiSVD(object):
M = None
U = None
sigma = None
VT = None
chrono = None
inv_work = None
inv_user = None
work_titles = None
def __init__(self):
self.chrono = Chrono(True)
def save(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self, f)
def load(self, filename):
with open(filename, 'rb') as f:
backup = pickle.load(f)
self.M = backup.M
self.U = backup.U
self.sigma = backup.sigma
self.VT = backup.VT
self.inv_work = backup.inv_work
self.inv_user = backup.inv_user
self.work_titles = backup.work_titles
def fit(self, X, y):
self.work_titles = {}
for work in Work.objects.values('id', 'title'):
self.work_titles[work['id']] = work['title']
work_ids = list(Rating.objects.values_list('work_id', flat=True).distinct())
nb_works = len(work_ids)
self.inv_work = {work_ids[i]: i for i in range(nb_works)}
user_ids = list(User.objects.values_list('id', flat=True))
nb_users = len(user_ids)
self.inv_user = {user_ids[i]: i for i in range(nb_users)}
self.chrono.save('get_work_ids')
# print("Computing M: (%i × %i)" % (nb_users, nb_works))
self.M = lil_matrix((nb_users, nb_works))
"""ratings_of = {}
for (user_id, work_id), rating in zip(X, y):
ratings_of.setdefault(user_id, []).append(rating)"""
for (user_id, work_id), rating in zip(X, y):
self.M[self.inv_user[user_id], self.inv_work[work_id]] = rating #- np.mean(ratings_of[user_id])
# np.save('backupM', self.M)
self.chrono.save('fill matrix')
# Ranking computation
self.U, self.sigma, self.VT = randomized_svd(self.M, NB_COMPONENTS, n_iter=3, random_state=42)
# print('Formes', self.U.shape, self.sigma.shape, self.VT.shape)
self.save('backup.pickle')
self.chrono.save('factor matrix')
def predict(self, X):
y = []
for user_id, work_id in X:
i = self.inv_user[user_id]
j = self.inv_work[work_id]
y.append(self.U[i].dot(np.diag(self.sigma)).dot(self.VT.transpose()[j]))
return np.array(y)
def get_reco(self, username, sending=False):
target_user = User.objects.get(username=username)
the_user_id = target_user.id
svd_user = User.objects.get(username='svd')
work_ids = {self.inv_work[work_id]: work_id for work_id in self.inv_work}
nb_works = len(work_ids)
seen_works = set(Rating.objects.filter(user__id=the_user_id).exclude(choice='willsee').values_list('work_id', flat=True))
the_i = self.inv_user[the_user_id]
self.chrono.save('get_seen_works')
print('mon vecteur (taille %d)' % len(self.U[the_i]), self.U[the_i])
print(self.sigma)
for i, line in enumerate(self.VT):
print('=> Ligne %d' % (i + 1), '(ma note : %f)' % self.U[the_i][i])
sorted_line = sorted((line[j], self.work_titles[work_ids[j]]) for j in range(nb_works))[::-1]
top5 = sorted_line[:10]
bottom5 = sorted_line[-10:]
for anime in top5:
print(anime)
for anime in bottom5:
print(anime)
"""if i == 0 or i == 1: # First two vectors explaining variance
with open('vector%d.json' % (i + 1), 'w') as f:
vi = X.dot(line).tolist()
x_norm = [np.dot(X.data[k], X.data[k]) / (nb_works + 1) for k in range(nb_users + 1)]
f.write(json.dumps({'v': [v / math.sqrt(x_norm[k]) if x_norm[k] != 0 else float('inf') for k, v in enumerate(vi)]}))"""
# print(VT.dot(VT.transpose()))
# return
the_ratings = self.predict((the_user_id, work_ids[j]) for j in range(nb_works))
ranking = sorted(zip(the_ratings, [(work_ids[j], self.work_titles[work_ids[j]]) for j in range(nb_works)]), reverse=True)
# Summarize the results of the ranking for the_user_id:
# “=> rank, title, score”
c = 0
for i, (rating, (work_id, title)) in enumerate(ranking, start=1):
if work_id not in seen_works:
print('=>', i, title, rating, self.predict([(the_user_id, work_id)]))
if Recommendation.objects.filter(user=svd_user, target_user__id=the_user_id, work__id=work_id).count() == 0:
Recommendation.objects.create(user=svd_user, target_user_id=the_user_id, work_id=work_id)
c += 1
elif i < TOP:
print(i, title, rating)
if c >= TOP:
break
"""print(len(connection.queries), 'queries')
for line in connection.queries:
print(line)"""
self.chrono.save('complete')
def __str__(self):
return '[SVD]'
def get_shortname(self):
return 'svd'
| agpl-3.0 |
MartialD/hyperspy | hyperspy/drawing/tiles.py | 4 | 2899 | # -*- coding: utf-8 -*-
# Copyright 2007-2011 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from hyperspy.drawing.figure import BlittedFigure
from hyperspy.drawing import utils
class HistogramTilePlot(BlittedFigure):
def __init__(self):
self.figure = None
self.title = ''
self.ax = None
def create_axis(self, ncols=1, nrows=1, number=1, title=''):
ax = self.figure.add_subplot(ncols, nrows, number)
ax.set_title(title)
ax.hspy_fig = self
return ax
def plot(self, db, **kwargs):
if self.figure is None:
self.create_figure()
ncomps = len(db)
if not ncomps:
return
else:
self.update(db, **kwargs)
def update(self, db, **kwargs):
ncomps = len(db)
# get / set axes
i = -1
for c_n, v in db.items():
i += 1
ncols = len(v)
istart = ncols * i
j = 0
for p_n, (hist, bin_edges) in v.items():
j += 1
mask = hist > 0
if np.any(mask):
title = c_n + ' ' + p_n
ax = self.create_axis(ncomps, ncols, istart + j, title)
self.ax = ax
# remove previous
while ax.patches:
ax.patches[0].remove()
# set new; only draw non-zero height bars
ax.bar(
bin_edges[
:-1][mask],
hist[mask],
np.diff(bin_edges)[mask],
# animated=True,
**kwargs)
width = bin_edges[-1] - bin_edges[0]
ax.set_xlim(
bin_edges[0] - width * 0.1, bin_edges[-1] + width * 0.1)
ax.set_ylim(0, np.max(hist) * 1.1)
# ax.set_title(c_n + ' ' + p_n)
self.figure.canvas.draw_idle()
def close(self):
try:
plt.close(self.figure)
except BaseException:
pass
self.figure = None
| gpl-3.0 |
Rocamadour7/ml_tutorial | 05. Clustering/titanic-data-example.py | 1 | 1721 | import numpy as np
from sklearn.cluster import KMeans
from sklearn import preprocessing
import pandas as pd
'''
Pclass Passenger Class (1 = 1st; 2 = 2nd; 3 = 3rd)
survival Survival (0 = No; 1 = Yes)
name Name
sex Sex
age Age
sibsp Number of Siblings/Spouses Aboard
parch Number of Parents/Children Aboard
ticket Ticket Number
fare Passenger Fare (British pound)
cabin Cabin
embarked Port of Embarkation (C = Cherbourg; Q = Queenstown; S = Southampton)
boat Lifeboat
body Body Identification Number
home.dest Home/Destination
'''
df = pd.read_excel('titanic.xls')
df.drop(['body', 'name'], 1, inplace=True)
df.fillna(0, inplace=True)
def handle_non_numerical_data(df):
columns = df.columns.values
for column in columns:
text_digit_vals = {}
def convert_to_int(val):
return text_digit_vals[val]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_contents = df[column].values.tolist()
unique_elements = set(column_contents)
x = 0
for unique in unique_elements:
if unique not in text_digit_vals:
text_digit_vals[unique] = x
x += 1
df[column] = list(map(convert_to_int, df[column]))
return df
df = handle_non_numerical_data(df)
X = np.array(df.drop(['survived'], 1).astype(float))
X = preprocessing.scale(X)
y = np.array(df['survived'])
clf = KMeans(n_clusters=2)
clf.fit(X)
correct = 0
for i in range(len(X)):
predict_me = np.array(X[i].astype(float))
predict_me = predict_me.reshape(-1, len(predict_me))
prediction = clf.predict(predict_me)
if prediction[0] == y[i]:
correct += 1
print(correct/len(X))
| mit |
huytd/dejavu | dejavu/fingerprint.py | 1 | 6020 | import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import (generate_binary_structure,
iterate_structure, binary_erosion)
import hashlib
from operator import itemgetter
IDX_FREQ_I = 0
IDX_TIME_J = 1
######################################################################
# Sampling rate, related to the Nyquist conditions, which affects
# the range frequencies we can detect.
DEFAULT_FS = 44100
######################################################################
# Size of the FFT window, affects frequency granularity
DEFAULT_WINDOW_SIZE = 4096
######################################################################
# Ratio by which each sequential window overlaps the last and the
# next window. Higher overlap will allow a higher granularity of offset
# matching, but potentially more fingerprints.
DEFAULT_OVERLAP_RATIO = 0.5
######################################################################
# Degree to which a fingerprint can be paired with its neighbors --
# higher will cause more fingerprints, but potentially better accuracy.
DEFAULT_FAN_VALUE = 15
######################################################################
# Minimum amplitude in spectrogram in order to be considered a peak.
# This can be raised to reduce number of fingerprints, but can negatively
# affect accuracy.
DEFAULT_AMP_MIN = 10
######################################################################
# Number of cells around an amplitude peak in the spectrogram in order
# for Dejavu to consider it a spectral peak. Higher values mean less
# fingerprints and faster matching, but can potentially affect accuracy.
PEAK_NEIGHBORHOOD_SIZE = 20
######################################################################
# Thresholds on how close or far fingerprints can be in time in order
# to be paired as a fingerprint. If your max is too low, higher values of
# DEFAULT_FAN_VALUE may not perform as expected.
MIN_HASH_TIME_DELTA = 0
MAX_HASH_TIME_DELTA = 200
######################################################################
# If True, will sort peaks temporally for fingerprinting;
# not sorting will cut down number of fingerprints, but potentially
# affect performance.
PEAK_SORT = True
######################################################################
# Number of bits to throw away from the front of the SHA1 hash in the
# fingerprint calculation. The more you throw away, the less storage, but
# potentially higher collisions and misclassifications when identifying songs.
FINGERPRINT_REDUCTION = 20
def fingerprint(channel_samples, Fs=DEFAULT_FS,
wsize=DEFAULT_WINDOW_SIZE,
wratio=DEFAULT_OVERLAP_RATIO,
fan_value=DEFAULT_FAN_VALUE,
amp_min=DEFAULT_AMP_MIN):
"""
FFT the channel, log transform output, find local maxima, then return
locally sensitive hashes.
"""
# FFT the signal and extract frequency components
arr2D = mlab.specgram(
channel_samples,
NFFT=wsize,
Fs=Fs,
window=mlab.window_hanning,
noverlap=int(wsize * wratio))[0]
# apply log transform since specgram() returns linear array
arr2D = 10 * np.log10(arr2D)
arr2D[arr2D == -np.inf] = 0 # replace infs with zeros
# find local maxima
local_maxima = get_2D_peaks(arr2D, plot=False, amp_min=amp_min)
# return hashes
return generate_hashes(local_maxima, fan_value=fan_value)
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
struct = generate_binary_structure(2, 1)
neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)
# find local maxima using our fliter shape
local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
background = (arr2D == 0)
eroded_background = binary_erosion(background, structure=neighborhood,
border_value=1)
# Boolean mask of arr2D with True at peaks
detected_peaks = local_max - eroded_background
# extract peaks
amps = arr2D[detected_peaks]
j, i = np.where(detected_peaks)
# filter peaks
amps = amps.flatten()
peaks = zip(i, j, amps)
peaks_filtered = [x for x in peaks if x[2] > amp_min] # freq, time, amp
# get indices for frequency and time
frequency_idx = [x[1] for x in peaks_filtered]
time_idx = [x[0] for x in peaks_filtered]
if plot:
# scatter of the peaks
fig, ax = plt.subplots()
ax.imshow(arr2D)
ax.scatter(time_idx, frequency_idx)
ax.set_xlabel('Time')
ax.set_ylabel('Frequency')
ax.set_title("Spectrogram")
plt.gca().invert_yaxis()
plt.show()
return zip(frequency_idx, time_idx)
def generate_hashes(peaks, fan_value=DEFAULT_FAN_VALUE):
"""
Hash list structure:
sha1_hash[0:20] time_offset
[(e05b341a9b77a51fd26, 32), ... ]
"""
fingerprinted = set() # to avoid rehashing same pairs
if PEAK_SORT:
peaks.sort(key=itemgetter(1))
for i in range(len(peaks)):
for j in range(1, fan_value):
if (i + j) < len(peaks) and not (i, i + j) in fingerprinted:
freq1 = peaks[i][IDX_FREQ_I]
freq2 = peaks[i + j][IDX_FREQ_I]
t1 = peaks[i][IDX_TIME_J]
t2 = peaks[i + j][IDX_TIME_J]
t_delta = t2 - t1
if t_delta >= MIN_HASH_TIME_DELTA and t_delta <= MAX_HASH_TIME_DELTA:
h = hashlib.sha1(
"%s|%s|%s" % (str(freq1), str(freq2), str(t_delta)))
yield (h.hexdigest()[0:FINGERPRINT_REDUCTION], t1)
# ensure we don't repeat hashing
fingerprinted.add((i, i + j))
| mit |
xuleiboy1234/autoTitle | tensorflow/tensorflow/examples/learn/wide_n_deep_tutorial.py | 18 | 8111 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
import sys
import tempfile
import pandas as pd
from six.moves import urllib
import tensorflow as tf
CSV_COLUMNS = [
"age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"
]
gender = tf.feature_column.categorical_column_with_vocabulary_list(
"gender", ["Female", "Male"])
education = tf.feature_column.categorical_column_with_vocabulary_list(
"education", [
"Bachelors", "HS-grad", "11th", "Masters", "9th",
"Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th",
"Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th",
"Preschool", "12th"
])
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
"marital_status", [
"Married-civ-spouse", "Divorced", "Married-spouse-absent",
"Never-married", "Separated", "Married-AF-spouse", "Widowed"
])
relationship = tf.feature_column.categorical_column_with_vocabulary_list(
"relationship", [
"Husband", "Not-in-family", "Wife", "Own-child", "Unmarried",
"Other-relative"
])
workclass = tf.feature_column.categorical_column_with_vocabulary_list(
"workclass", [
"Self-emp-not-inc", "Private", "State-gov", "Federal-gov",
"Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked"
])
# To show an example of hashing:
occupation = tf.feature_column.categorical_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.feature_column.categorical_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# Continuous base columns.
age = tf.feature_column.numeric_column("age")
education_num = tf.feature_column.numeric_column("education_num")
capital_gain = tf.feature_column.numeric_column("capital_gain")
capital_loss = tf.feature_column.numeric_column("capital_loss")
hours_per_week = tf.feature_column.numeric_column("hours_per_week")
# Transformations.
age_buckets = tf.feature_column.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# Wide columns and deep columns.
base_columns = [
gender, education, marital_status, relationship, workclass, occupation,
native_country, age_buckets,
]
crossed_columns = [
tf.feature_column.crossed_column(
["education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
[age_buckets, "education", "occupation"], hash_bucket_size=1000),
tf.feature_column.crossed_column(
["native_country", "occupation"], hash_bucket_size=1000)
]
deep_columns = [
tf.feature_column.indicator_column(workclass),
tf.feature_column.indicator_column(education),
tf.feature_column.indicator_column(gender),
tf.feature_column.indicator_column(relationship),
# To show an example of embedding
tf.feature_column.embedding_column(native_country, dimension=8),
tf.feature_column.embedding_column(occupation, dimension=8),
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
]
def maybe_download(train_data, test_data):
"""Maybe downloads training data and returns train and test file names."""
if train_data:
train_file_name = train_data
else:
train_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
train_file.name) # pylint: disable=line-too-long
train_file_name = train_file.name
train_file.close()
print("Training data is downloaded to %s" % train_file_name)
if test_data:
test_file_name = test_data
else:
test_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test",
test_file.name) # pylint: disable=line-too-long
test_file_name = test_file.name
test_file.close()
print("Test data is downloaded to %s"% test_file_name)
return train_file_name, test_file_name
def build_estimator(model_dir, model_type):
"""Build an estimator."""
if model_type == "wide":
m = tf.estimator.LinearClassifier(
model_dir=model_dir, feature_columns=base_columns + crossed_columns)
elif model_type == "deep":
m = tf.estimator.DNNClassifier(
model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.estimator.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=crossed_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
return m
def input_fn(data_file, num_epochs, shuffle):
"""Input builder function."""
df_data = pd.read_csv(
tf.gfile.Open(data_file),
names=CSV_COLUMNS,
skipinitialspace=True,
engine="python",
skiprows=1)
# remove NaN elements
df_data = df_data.dropna(how="any", axis=0)
labels = df_data["income_bracket"].apply(lambda x: ">50K" in x).astype(int)
return tf.estimator.inputs.pandas_input_fn(
x=df_data,
y=labels,
batch_size=100,
num_epochs=num_epochs,
shuffle=shuffle,
num_threads=5)
def train_and_eval(model_dir, model_type, train_steps, train_data, test_data):
"""Train and evaluate the model."""
train_file_name, test_file_name = maybe_download(train_data, test_data)
# Specify file path below if want to find the output easily
model_dir = tempfile.mkdtemp() if not model_dir else model_dir
m = build_estimator(model_dir, model_type)
# set num_epochs to None to get infinite stream of data.
m.train(
input_fn=input_fn(train_file_name, num_epochs=None, shuffle=True),
steps=train_steps)
# set steps to None to run evaluation until all data consumed.
results = m.evaluate(
input_fn=input_fn(test_file_name, num_epochs=1, shuffle=False),
steps=None)
print("model directory = %s" % model_dir)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
# Manual cleanup
shutil.rmtree(model_dir)
FLAGS = None
def main(_):
train_and_eval(FLAGS.model_dir, FLAGS.model_type, FLAGS.train_steps,
FLAGS.train_data, FLAGS.test_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_dir",
type=str,
default="",
help="Base directory for output models."
)
parser.add_argument(
"--model_type",
type=str,
default="wide_n_deep",
help="Valid model types: {'wide', 'deep', 'wide_n_deep'}."
)
parser.add_argument(
"--train_steps",
type=int,
default=2000,
help="Number of training steps."
)
parser.add_argument(
"--train_data",
type=str,
default="",
help="Path to the training data."
)
parser.add_argument(
"--test_data",
type=str,
default="",
help="Path to the test data."
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| mit |
joshloyal/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
eriksonJAguiar/TCC-UENP-Codigos | My_codes/tools-sentiment/word_freq.py | 1 | 4759 | import nltk
import pandas as pd
import re
from googletrans import Translator
from unicodedata import normalize
def read_csv(file):
df1 = pd.DataFrame.from_csv('files_extern/%s.csv'%(file),sep=';',index_col=0,encoding ='ISO-8859-1')
df1 = df1.reset_index()
return df1
def write_csv(data,file):
df = pd.DataFrame(data)
df.to_csv('files_extern/'+file+'.csv', mode='w', sep=';',index=False, header=False,encoding='utf8')
def clear(dataframe):
new_df_tweet = []
new_df_sent = []
zipped = zip(dataframe['tweet'],dataframe['opiniao'])
for (df,opiniao) in zipped:
expr = re.sub(r"http\S+", "", df)
#expr = re.sub(r"[@#]\S+","",expr)
expr = normalize('NFKD',expr).encode('ASCII','ignore').decode('ASCII')
filtrado = [w for w in nltk.regexp_tokenize(expr.lower(),"[^0-9\W_]+") if not w in nltk.corpus.stopwords.words('portuguese')]
for f in filtrado:
if len(f) >= 2:
#print(f)
#print(opiniao)
new_df_tweet.append(f)
new_df_sent.append(opiniao)
new_df = pd.DataFrame()
new_df['tokens'] = new_df_tweet
new_df['sentimento'] = new_df_sent
return new_df
def convert_df(df):
new_df = []
for d in df:
if d == 'Positivo':
new_df.append(1)
elif d == 'Neutro':
new_df.append(0)
elif d == 'Negativo':
new_df.append(-1)
return new_df
def exlusivos(vet_neg,vet_neu,vet_pos):
ex_pos = []
ex_neg = []
ex_neu = []
tupla = zip(vet_neg,vet_neu,vet_pos)
for (neg,neu,pos) in tupla:
if not (neg in vet_pos or neg in vet_neu):
ex_neg.append(neg)
if not (neu in vet_neg or neu in vet_pos):
ex_neu.append(neu)
if not (pos in vet_neg or pos in vet_neu):
ex_pos.append(pos)
print(ex_neg)
print(ex_neu)
print(ex_pos)
return ex_neg, ex_neu, ex_pos
def bigram(frases,vet_neg, vet_neu,vet_pos):
bi_neg = []
bi_neu = []
bi_pos = []
for f in frases:
if f.find()
if __name__ == '__main__':
df_tweets = read_csv('dataset-portuguese')
df_tweets['opiniao'] = convert_df(df_tweets['opiniao'])
df_words = clear(df_tweets)
neg = df_words.loc[df_words['sentimento'] == -1]
neu = df_words.loc[df_words['sentimento'] == 0]
pos = df_words.loc[df_words['sentimento'] == 1]
neg_freq = nltk.FreqDist(neg['tokens'])
neu_freq = nltk.FreqDist(neu['tokens'])
pos_freq = nltk.FreqDist(pos['tokens'])
vet_neg = []
vet_neu = []
vet_pos = []
#neg_freq.plot(50, cumulative=False)
#neu_freq.plot(50, cumulative=False)
#pos_freq.plot(50, cumulative=False)
#print(neg_freq.most_common(30))
#print('------------------------')
#print(neu_freq.most_common(30))
#print('------------------------')
#print(pos_freq.most_common(30))
tupla = zip(neg_freq.most_common(len(neg)),neu_freq.most_common(len(neu)),pos_freq.most_common(len(pos)))
df_neg = pd.DataFrame()
df_neu = pd.DataFrame()
df_pos = pd.DataFrame()
words_neg = dict()
words_neu = dict()
words_pos = dict()
words_neg['pt'] = []
words_neg['en'] = []
words_neg['es'] = []
words_neu['pt'] = []
words_neu['en'] = []
words_neu['es'] = []
words_pos['pt'] = []
words_pos['en'] = []
words_pos['es'] = []
#neg_freq.plot(30, cumulative=False)
translator = Translator(service_urls=['translate.google.com','translate.google.com.br'])
for (ng,nu,ps) in tupla:
vet_neg.append(ng[0])
vet_neu.append(nu[0])
vet_pos.append(ps[0])
vet_neg, vet_neu,vet_pos = exlusivos(vet_neg,vet_neu,vet_pos)
tupla = zip(vet_neg[:50],vet_neu[:50],vet_pos[:50])
for (ng,nu,ps) in tupla:
words_neg['pt'].append(ng)
en=translator.translate(ng, dest='en').text
words_neg['en'].append(en)
words_neg['es'].append(translator.translate(en, dest='es').text)
words_neu['pt'].append(nu)
en=translator.translate(nu, dest='en').text
words_neu['en'].append(en)
words_neu['es'].append(translator.translate(en, dest='es').text)
words_pos['pt'].append(ps)
en=translator.translate(ps, dest='en').text
words_pos['en'].append(en)
words_pos['es'].append(translator.translate(en, dest='es').text)
df_neg['pt'] = words_neg['pt']
df_neg['en'] = words_neg['en']
df_neg['es'] = words_neg['es']
df_neu['pt'] = words_neu['pt']
df_neu['en'] = words_neu['en']
df_neu['es'] = words_neu['es']
df_pos['pt'] = words_pos['pt']
df_pos['en'] = words_pos['en']
df_pos['es'] = words_pos['es']
write_csv(df_neg,'bigram_neg')
write_csv(df_neu,'bigram_neu')
write_csv(df_pos,'bigram_pos')
| gpl-3.0 |
pypot/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
hahnicity/ace | chapter1/problem3.py | 1 | 1222 | """
Problem 3.
calculate the time series
yt = 5 + .05 * t + Et (Where E is epsilon)
for years 1960, 1961, ..., 2001 assuming Et independently and
identically distributed with mean 0 and sigma 0.2.
"""
from random import uniform
from matplotlib.pyplot import plot, show
from numpy import array, polyfit, poly1d
def create_distribution(size):
"""
Create a distribution, identically distributed, with mean 0 and
sigma 0.2
"""
# Shit it's way easier to just do some uniform distribution
# This is a bit over my head, and not possible for me without
# pen and paper
return array([uniform(-0.2, .2) for _ in xrange(size)])
def create_time_series(start_year, end_year):
"""
Create the time series, yt, then perform a regress on yt, plot yt and the
its trendline
"""
t_array = array(range(start_year, end_year + 1))
epsilon_t = create_distribution(len(t_array))
yt = array([5 + .05 * t_i + epsilon_t[i] for i, t_i in enumerate(t_array)])
fit = polyfit(t_array, yt, 1)
fit_func = poly1d(fit)
plot(t_array, yt, "yo", t_array, fit_func(t_array), "--k")
show()
def main():
create_time_series(1960, 2001)
if __name__ == "__main__":
main()
| unlicense |
gnu-sandhi/sandhi | modules/gr36/gnuradio-core/src/examples/pfb/interpolate.py | 17 | 8253 | #!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 100000 # number of samples to use
self._fs = 2000 # initial sampling rate
self._interp = 5 # Interpolation rate for PFB interpolator
self._ainterp = 5.5 # Resampling rate for the PFB arbitrary resampler
# Frequencies of the signals we construct
freq1 = 100
freq2 = 200
# Create a set of taps for the PFB interpolator
# This is based on the post-interpolation sample rate
self._taps = gr.firdes.low_pass_2(self._interp, self._interp*self._fs, freq2+50, 50,
attenuation_dB=120, window=gr.firdes.WIN_BLACKMAN_hARRIS)
# Create a set of taps for the PFB arbitrary resampler
# The filter size is the number of filters in the filterbank; 32 will give very low side-lobes,
# and larger numbers will reduce these even farther
# The taps in this filter are based on a sampling rate of the filter size since it acts
# internally as an interpolator.
flt_size = 32
self._taps2 = gr.firdes.low_pass_2(flt_size, flt_size*self._fs, freq2+50, 150,
attenuation_dB=120, window=gr.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._interp))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._interp
print "Taps per channel: ", tpc
# Create a couple of signals at different frequencies
self.signal1 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq1, 0.5)
self.signal2 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq2, 0.5)
self.signal = gr.add_cc()
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct the PFB interpolator filter
self.pfb = blks2.pfb_interpolator_ccf(self._interp, self._taps)
# Construct the PFB arbitrary resampler filter
self.pfb_ar = blks2.pfb_arb_resampler_ccf(self._ainterp, self._taps2, flt_size)
self.snk_i = gr.vector_sink_c()
#self.pfb_ar.pfb.print_taps()
#self.pfb.pfb.print_taps()
# Connect the blocks
self.connect(self.signal1, self.head, (self.signal,0))
self.connect(self.signal2, (self.signal,1))
self.connect(self.signal, self.pfb)
self.connect(self.signal, self.pfb_ar)
self.connect(self.signal, self.snk_i)
# Create the sink for the interpolated signals
self.snk1 = gr.vector_sink_c()
self.snk2 = gr.vector_sink_c()
self.connect(self.pfb, self.snk1)
self.connect(self.pfb_ar, self.snk2)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
# Plot input signal
fs = tb._fs
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-2.5, 2.5])
sp1_t.set_title("Input Signal", weight="bold")
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot output of PFB interpolator
fs_int = tb._fs*tb._interp
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk1.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_int/2.0, fs_int/2.0, fs_int/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_int = 1.0/fs_int
Tmax = len(d)*Ts_int
t_o = scipy.arange(0, Tmax, Ts_int)
x_o1 = scipy.array(d)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o1.real, "b-o")
#p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
# Plot output of PFB arbitrary resampler
fs_aint = tb._fs * tb._ainterp
sp3_f = fig3.add_subplot(2, 1, 1)
d = tb.snk2.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_aint/2.0, fs_aint/2.0, fs_aint/float(X_o.size))
p3_f = sp3_f.plot(f_o, X_o, "b")
sp3_f.set_xlim([min(f_o), max(f_o)+1])
sp3_f.set_ylim([-200.0, 50.0])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Frequency (Hz)")
sp3_f.set_ylabel("Power (dBW)")
Ts_aint = 1.0/fs_aint
Tmax = len(d)*Ts_aint
t_o = scipy.arange(0, Tmax, Ts_aint)
x_o2 = scipy.array(d)
sp3_f = fig3.add_subplot(2, 1, 2)
p3_f = sp3_f.plot(t_o, x_o2.real, "b-o")
p3_f = sp3_f.plot(t_o, x_o1.real, "m-o")
#p3_f = sp3_f.plot(t_o, x_o2.imag, "r-o")
sp3_f.set_ylim([-2.5, 2.5])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Time (s)")
sp3_f.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
gclenaghan/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 73 | 6086 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr, decimal=5)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features + 1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
tbtraltaa/medianshape | medianshape/simplicial/surfgen.py | 1 | 10038 | # encoding: utf-8
'''
2D surface embedded in 3D
-------------------------
'''
from __future__ import absolute_import
import importlib
import os
import numpy as np
from medianshape.simplicial import pointgen3d, mesh, utils
from medianshape.simplicial.meshgen import meshgen2d
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from medianshape.viz import plot2d, plot3d
from distmesh.plotting import axes_simpplot3d
from medianshape.simplicial.utils import boundary_points
def func(x, y, sign=1):
'''
:math:`\sin\pi x \cos \pi y`.
'''
return np.sin(np.pi*x)*np.cos(np.pi*y)
def sample_surf(scale, step=0.2):
'''
Returns a tuple X, Y, Z of a surface for an experiment.
'''
x = y = np.arange(-4.0, 4.0, step)
X, Y = np.meshgrid(x, y)
from matplotlib.mlab import bivariate_normal
'''
Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
#Z3 = bivariate_normal(X, Y, 1, 1, -2, -2)
Z = Z2 - Z1
'''
# Ups
ZU1 = bivariate_normal(X,Y, 1.5, 1, 0,-2)
ZU2 = bivariate_normal(X, Y, 1.5, 1.5, 4, 1)
ZU3 = bivariate_normal(X, Y, 1, 1, -4, 1)
#ZU4 = bivariate_normal(X, Y, 1.5, 1.5, -4, -4)
#ZU5 = bivariate_normal(X, Y, 1, 1, 4, -4)
ZU4 = bivariate_normal(X, Y, 4, 0.5, 0, -4)
# Downs
ZD1 = bivariate_normal(X, Y, 1.5, 1, 0, 1)
ZD2 = bivariate_normal(X, Y, 1.5, 1.5, -4, -2)
ZD3 = bivariate_normal(X, Y, 1, 1, 4, -2)
ZD4 = bivariate_normal(X, Y, 4, 1, 0, 4)
Z1 = ZU1 + ZU2 + ZU3 - ZD1 - ZD2 - ZD3 - ZD4
Zmax1 = np.abs(np.amax(Z1))
Z1 = Z1/Zmax1 * scale[2]
# Visualization
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.plot_surface(X, Y, Z1, rstride=1, cstride=1, cmap=cm.winter,
linewidth=0, antialiased=False)
plt.show()
# Ups
ZU1 = bivariate_normal(X,Y, 2, 1, 1,1)
ZU2 = bivariate_normal(X, Y, 3, 1, -2, 4)
ZU3 = bivariate_normal(X, Y, 1.5, 1.5, -2, -2)
#ZU4 = bivariate_normal(X, Y, 1.5, 1.5, -4, -4)
#ZU5 = bivariate_normal(X, Y, 1, 1, 4, -4)
ZU4 = bivariate_normal(X, Y, 2, 2, 3, -4)
# Downs
ZD1 = bivariate_normal(X, Y, 1, 2, 4, 2)
ZD2 = bivariate_normal(X, Y, 1.5, 1.5, -2, 2)
ZD3 = bivariate_normal(X, Y, 1.5, 1.5, 1, -2)
ZD4 = bivariate_normal(X, Y, 4, 1, 0, -4)
Z2 = ZU1 + ZU2 + ZU3 - ZD1 - ZD2 - ZD3 - ZD4
Zmax2 = np.abs(np.amax(Z2))
Z2 = Z2/Zmax2 * scale[2]
X = X * scale[0]/4.0
Y = Y * scale[1]/4.0
# Visualization
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.plot_surface(X, Y, Z2, rstride=1, cstride=1, cmap=cm.winter,
linewidth=0, antialiased=False)
plt.show()
return X, Y, Z1, Z2
def interpolate_surf(points, values, ipoints, method = "nearest"):
from scipy.interpolate import griddata
'''
Used to interpolate a sample surface to a surface in a mesh.
'''
return griddata(points, values, ipoints, method= method)
def surfgen_shared_boundary(bbox=[-10,-10,-10, 10,10,10], l=3):
'''
Generates two surfaces in 3D with shared boundary for an experiment.
Writes the two surface as .poly file for tetgen.
'''
# Generating point grids for two surfaces
xmin = bbox[0]
xmax = bbox[3]
ymin = bbox[1]
ymax = bbox[4]
zmin = bbox[2]
zmax = bbox[5]
Xmin, Ymin, Zmin, Xmax, Ymax, Zmax = np.array(bbox)*0.8
X, Y, Z1, Z2 = sample_surf([Xmax, Ymax, zmax*0.3], step=0.8)
Z1 = Z1 + zmax*0.4
Z2 = Z2 - zmax*0.4
#Symmertic surfs
#Z2 = -Z1 - zmax*0.4
'''
# Plotting the two surfaces
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.scatter(X, Y, Z1.reshape(-1,1), color='b')
surf = ax.scatter(X, Y, Z2.reshape(-1,1), color='r')
plt.show()
'''
mesh = meshgen2d([Xmin, Ymin, Xmax, Ymax], l, include_corners=True)
sample_points = np.hstack((X.reshape(-1,1), Y.reshape(-1,1)))
# Interpolating the surface mesh into two different surfaces
# similar to the the sample surfaces generated before
Z1 = interpolate_surf(sample_points, Z1.reshape(-1,1), mesh.points)
Z2 = interpolate_surf(sample_points, Z2.reshape(-1,1), mesh.points)
# Integrating two surfaces
points1 = np.hstack((mesh.points, Z1))
print points1.shape
points2 = np.hstack((mesh.points, Z2))
print points2.shape
corners = utils.boundary_points(bbox)
midcorners = utils.mid_corners(bbox)
offset1 = len(corners) +len(midcorners) + 1
offset2 = len(corners) + len(midcorners) + len(points1) + 1
points = np.concatenate((corners, midcorners, points1, points2), axis=0)
print points.shape
triangles1 = mesh.simplices + offset1
triangles2 = mesh.simplices + offset2
# Adding the indices of the points as the last column of the coordainate list
Xmin_s1 = np.argwhere(points1[:,0]==Xmin)
Xmin_s1_points = np.hstack((points1[Xmin_s1.reshape(-1,)], Xmin_s1))
# Sorting the indices such that the points are in increasing order of its y-component
Xmin_s1 = (Xmin_s1_points[:,3][np.argsort(Xmin_s1_points[:,1])] + offset1).astype(int)
Xmin_s2 = np.argwhere(points2[:,0]==Xmin)
Xmin_s2_points = np.hstack((points2[Xmin_s2.reshape(-1,)], Xmin_s2))
Xmin_s2 = (Xmin_s2_points[:,3][np.argsort(Xmin_s2_points[:,1])] + offset2).astype(int)
Xmax_s1 = np.argwhere(points1[:,0]==Xmax)
Xmax_s1_points = np.hstack((points1[Xmax_s1.reshape(-1,)], Xmax_s1))
Xmax_s1 = (Xmax_s1_points[:,3][np.argsort(Xmax_s1_points[:,1])] + offset1).astype(int)
Xmax_s2 = np.argwhere(points2[:,0]==Xmax)
Xmax_s2_points = np.hstack((points2[Xmax_s2.reshape(-1,)], Xmax_s2))
Xmax_s2 = (Xmax_s2_points[:,3][np.argsort(Xmax_s2_points[:,1])] + offset2).astype(int)
Ymin_s1 = np.argwhere(points1[:,1]==Ymin)
Ymin_s1_points = np.hstack((points1[Ymin_s1.reshape(-1,)], Ymin_s1))
Ymin_s1 = (Ymin_s1_points[:,3][np.argsort(Ymin_s1_points[:,0])] + offset1).astype(int)
Ymin_s2 = np.argwhere(points2[:,1]==Ymin)
Ymin_s2_points = np.hstack((points2[Ymin_s2.reshape(-1,)], Ymin_s2))
Ymin_s2 = (Ymin_s2_points[:,3][np.argsort(Ymin_s2_points[:,0])] + offset2).astype(int)
Ymax_s1 = np.argwhere(points1[:,1]==Ymax)
Ymax_s1_points = np.hstack((points1[Ymax_s1.reshape(-1,)], Ymax_s1))
Ymax_s1 = (Ymax_s1_points[:,3][np.argsort(Ymax_s1_points[:,0])] + offset1).astype(int)
Ymax_s2 = np.argwhere(points2[:,1]==Ymax)
Ymax_s2_points = np.hstack((points2[Ymax_s2.reshape(-1,)], Ymax_s2))
Ymax_s2 = (Ymax_s2_points[:,3][np.argsort(Ymax_s2_points[:,0])] + offset2).astype(int)
for i in range(len(Xmin_s1)-1):
triangles1 = np.vstack((triangles1, [9, Xmin_s1[i], Xmin_s1[i+1]]))
triangles1 = np.vstack((triangles1, [9, Xmin_s1[-1], 12]))
for i in range(len(Xmin_s2)-1):
triangles2 = np.vstack((triangles2, [9, Xmin_s2[i], Xmin_s2[i+1]]))
triangles2 = np.vstack((triangles2, [9, Xmin_s2[-1], 12]))
for i in range(len(Xmax_s1)-1):
triangles1 = np.vstack((triangles1, [10, Xmax_s1[i], Xmax_s1[i+1]]))
triangles1 = np.vstack((triangles1, [10, Xmax_s1[-1], 11]))
for i in range(len(Xmax_s2)-1):
triangles2 = np.vstack((triangles2, [10, Xmax_s2[i], Xmax_s2[i+1]]))
triangles2 = np.vstack((triangles2, [10, Xmax_s2[-1], 11]))
for i in range(len(Ymin_s1)-1):
triangles1 = np.vstack((triangles1, [9, Ymin_s1[i], Ymin_s1[i+1]]))
triangles1 = np.vstack((triangles1, [9, Ymin_s1[-1], 10]))
for i in range(len(Ymin_s2)-1):
triangles2 = np.vstack((triangles2, [9, Ymin_s2[i], Ymin_s2[i+1]]))
triangles2 = np.vstack((triangles2, [9, Ymin_s2[-1], 10]))
for i in range(len(Ymax_s1)-1):
triangles1 = np.vstack((triangles1, [12, Ymax_s1[i], Ymax_s1[i+1]]))
triangles1 = np.vstack((triangles1, [12, Ymax_s1[-1], 11]))
for i in range(len(Ymax_s2)-1):
triangles2 = np.vstack((triangles2, [12, Ymax_s2[i], Ymax_s2[i+1]]))
triangles2 = np.vstack((triangles2, [12, Ymax_s2[-1], 11]))
triangles = np.vstack((triangles1, triangles2))
# Preparing PLC and save it to .poly file for tetgen
with open( os.environ['HOME'] +'/mediansurf.poly', 'w') as f:
f.write("#Part 1 - the node list\n")
f.write("#%d nodes in 3d, no attributes, no boundary marker\n"%points.shape[0])
f.write('%d %d %d %d\n'%(points.shape[0], 3, 0,0))
for i, p in enumerate(points):
f.write("%d %f %f %f\n"%(i+1, p[0], p[1], p[2]))
# Each 4 sides has 3 polygons
# Top and bottom
# Each triangle of the two surfaces are facets
fn = 6 + len(triangles)
f.write("#Part 2 - the facet list.\n")
f.write("#%d facets with boundary markers\n"%fn)
f.write('%d %d\n'%(fn, 1))
f.write("#Boundary facet list.\n")
f.write("%d %d %d\n"%(1, 0, 1))
f.write("4 1 2 3 4\n")
f.write("%d %d %d\n"%(1, 0, 1))
f.write("4 5 6 7 8\n")
#xmin side
f.write("2 0 1\n")
f.write("4 1 4 8 5\n")
f.write("2 9 12\n")
#ymin side
f.write("2 0 1\n")
f.write("4 1 2 6 5\n")
f.write("2 9 10\n")
#xmax side
f.write("2 0 1\n")
f.write("4 2 3 7 6\n")
f.write("2 10 11\n")
#ymax side
f.write("2 0 1\n")
f.write("4 3 4 8 7\n")
f.write("2 11 12\n")
f.write("#Facet list of surface1.\n")
for t in triangles1:
f.write("%d %d %d\n"%(1, 0, -1))
f.write("%d %d %d %d\n"%(3, t[0], t[1], t[2]))
f.write("#Facet list of surface2.\n")
for t in triangles2:
f.write("%d %d %d\n"%(1, 0, -2))
f.write("%d %d %d %d\n"%(3, t[0], t[1], t[2]))
f.write("#Part 3 - the hole list.\n")
f.write('%d\n'%0)
f.write("#Part 4 - the region list.\n")
f.write('%d\n'%0)
if __name__ == "__main__":
surfgen_shared_boundary()
| gpl-3.0 |
rbharath/pande-gas | vs_utils/utils/dragon_utils.py | 3 | 5800 | """
Dragon utilities.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "BSD 3-clause"
from cStringIO import StringIO
import numpy as np
import os
import pandas as pd
import subprocess
import tempfile
from vs_utils.utils import SmilesGenerator
class Dragon(object):
"""
Wrapper for dragon6shell.
Parameters
----------
subset : str, optional (default '2d')
Descriptor subset.
kwargs : dict, optional
Keyword arguments for SmilesGenerator.
"""
def __init__(self, subset='2d', **kwargs):
self.subset = subset
self.initialized = False
self.config_filename, self.smiles_engine = None, None
self.smiles_engine_kwargs = kwargs
def initialize(self):
"""
Initialize.
This is not part of __init__ because it breaks IPython.parallel.
"""
fd, self.config_filename = tempfile.mkstemp()
os.close(fd)
with open(self.config_filename, 'wb') as f:
f.write(self.get_config())
self.smiles_engine = SmilesGenerator(**self.smiles_engine_kwargs)
self.initialized = True
def __del__(self):
"""
Cleanup.
"""
if self.config_filename is not None:
os.unlink(self.config_filename)
def get_config(self):
"""
Get configuration file.
"""
if self.subset == '2d':
return """<?xml version="1.0" encoding="utf-8"?>
<DRAGON version="6.0.36" script_version="1" generation_date="2014/11/17">
<OPTIONS>
<CheckUpdates value="true"/>
<SaveLayout value="true"/>
<ShowWorksheet value="false"/>
<Decimal_Separator value="."/>
<Missing_String value="NaN"/>
<DefaultMolFormat value="1"/>
<HelpBrowser value="/usr/bin/xdg-open"/>
<RejectUnusualValence value="false"/>
<Add2DHydrogens value="false"/>
<MaxSRforAllCircuit value="19"/>
<MaxSR value="35"/>
<MaxSRDetour value="30"/>
<MaxAtomWalkPath value="2000"/>
<LogPathWalk value="true"/>
<LogEdge value="true"/>
<Weights>
<weight name="Mass"/>
<weight name="VdWVolume"/>
<weight name="Electronegativity"/>
<weight name="Polarizability"/>
<weight name="Ionization"/>
<weight name="I-State"/>
</Weights>
<SaveOnlyData value="false"/>
<SaveLabelsOnSeparateFile value="false"/>
<SaveFormatBlock value="%b - %n.txt"/>
<SaveFormatSubBlock value="%b-%s - %n - %m.txt"/>
<SaveExcludeMisVal value="false"/>
<SaveExcludeAllMisVal value="false"/>
<SaveExcludeConst value="false"/>
<SaveExcludeNearConst value="false"/>
<SaveExcludeStdDev value="false"/>
<SaveStdDevThreshold value="0.0001"/>
<SaveExcludeCorrelated value="false"/>
<SaveCorrThreshold value="0.95"/>
<SaveExclusionOptionsToVariables value="false"/>
<SaveExcludeMisMolecules value="false"/>
<SaveExcludeRejectedMolecules value="false"/>
</OPTIONS>
<DESCRIPTORS>
<block id="1" SelectAll="true"/>
<block id="2" SelectAll="true"/>
<block id="3" SelectAll="true"/>
<block id="4" SelectAll="true"/>
<block id="5" SelectAll="true"/>
<block id="6" SelectAll="true"/>
<block id="7" SelectAll="true"/>
<block id="8" SelectAll="true"/>
<block id="9" SelectAll="true"/>
<block id="10" SelectAll="true"/>
<block id="11" SelectAll="true"/>
<block id="12" SelectAll="true"/>
<block id="21" SelectAll="true"/>
<block id="22" SelectAll="true"/>
<block id="23" SelectAll="true"/>
<block id="24" SelectAll="true"/>
<block id="25" SelectAll="true"/>
<block id="28" SelectAll="true"/>
<block id="29" SelectAll="true"/>
</DESCRIPTORS>
<MOLFILES>
<molInput value="stdin"/>
<molInputFormat value="SMILES"/>
</MOLFILES>
<OUTPUT>
<SaveStdOut value="true"/>
<SaveProject value="false"/>
<SaveFile value="false"/>
<logMode value="stderr"/>
</OUTPUT>
</DRAGON>
"""
else:
raise NotImplementedError
def get_descriptors(self, mols):
"""
Parameters
----------
mols : array_like
Molecules.
"""
if not self.initialized:
self.initialize()
smiles = [self.smiles_engine.get_smiles(mol) for mol in mols]
args = ['dragon6shell', '-s', self.config_filename]
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate('\n'.join(smiles))
if not stdout:
raise RuntimeError(stderr)
data, names = self.parse_descriptors(stdout)
# adjust for skipped molecules
# descriptors are in same order as smiles
missing = np.setdiff1d(smiles, names)
features = np.zeros(len(smiles), dtype=object)
idx = 0 # index into calculated features
for i, this_smiles in enumerate(smiles):
if this_smiles in missing:
features[i] = None
else:
assert this_smiles == names[idx] # confirm match
features[i] = data[idx]
idx += 1
assert len(features) == len(mols)
return features
def parse_descriptors(self, string):
"""
Parse Dragon descriptors.
Parameters
----------
string : str
Output from dragon6shell.
"""
df = pd.read_table(StringIO(string))
if self.subset == '2d':
del df['nHBonds'], df['Psi_e_1d'], df['Psi_e_1s']
# extract names
names = df['NAME'].values
# delete No. and NAME columns
del df['No.'], df['NAME']
return np.asarray(df, dtype=float), names
| bsd-3-clause |
suraj-jayakumar/lstm-rnn-ad | src/testdata/random_data_time_series/generate_data.py | 1 | 1042 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 23 11:15:12 2016
@author: suraj
"""
import random
import numpy as np
import pickle
import matplotlib.pyplot as plt
attachRateList = []
for i in range(3360):
attachRateList.append(random.uniform(4,6))
attachRateList = np.array(attachRateList)
encoded_attach_rate_list = np.fft.fft(attachRateList)
day_number_list = [i%7 for i in range(3360)]
encoded_day_number_list = np.fft.fft(day_number_list)
time_number_list = [i%96 for i in range(3360)]
encoded_time_number_list = np.fft.fft(time_number_list)
final_list_x = np.array([[encoded_day_number_list.real[i],encoded_day_number_list.imag[i],encoded_time_number_list.real[i],encoded_time_number_list.imag[i],encoded_attach_rate_list.real[i],encoded_attach_rate_list.imag[i]] for i in range(3360)])
final_list_y = [ (encoded_attach_rate_list[i].real,encoded_attach_rate_list[i].imag) for i in range(len(encoded_attach_rate_list)) ]
pickle.dump(final_list_x,open('x_att.p','wb'))
pickle.dump(final_list_y,open('y_att.p','wb'))
| apache-2.0 |
hackthemarket/pystrat | sim.py | 1 | 10697 | # simple trading strategy simulator
import pandas as pd
from pandas.tools.plotting import autocorrelation_plot
from pandas.tools.plotting import scatter_matrix
import numpy as np
from scipy import stats
import sklearn
from sklearn import preprocessing as pp
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import interactive
interactive(True)
import sys
import time
import logging as log
log.basicConfig(level=log.DEBUG)
import glob
import os.path
import pickle
import logging as log
log.basicConfig(level=log.DEBUG)
import random
import pdb
pd.set_option('display.width',500)
# define constant friction function
DefaultBPS = 10
def FrictionInBps(U, cfg, kvargs):
""" default FrictionInBps function just returns default,
but the interface receives all strategy info after
strategy is run, so one can create more realistic
impact models """
return DefaultBPS
""" default simulator cfg dictionary.
default keys/values:
FrictionInBps - function that takes same args as strategy.
by default, returns DefaultBps.
InitBal - in $s
Reinvest - should we reinvest our winnings or constantly assume we have InitBal?
Verbose
"""
DEF_SIM_CFG= { 'FrictionInBps': FrictionInBps,
'Verbose' : True,
'InitBal' : 1e7,
'Reinvest' : True }
# columns in prepped univ
SIM_COLS = ["Sym","Product","Instrument",
"Multiplier","Expiry","Strike",
"Open","High","Low","Close","Volume"]
SIM_COLS_OUT = ["Prev_Weight", "Weight", "Prev_Qty", "Qty",
"Trade_Qty", "Trade_Fric", "PNL", "NET_PNL"]
SIM_COL_BALS =[ "NAV","Friction","PNL","NET_PNL", "Longs","Shorts",
"Long_Dlrs","Short_Dlrs","Num_Trades","Turnover","NET_Return"]
def squarem( df, sym='Sym', min_pct=.9 ) :
# sim_squarem solves the common problem in which you have a large table of
# data grouped by symbols, some of which have missing data. You want to
# 'square' the data such that any symbol which is missing 'too much' data
# is expunged and the remaining data is filled appropriately, leaving you
# with a dataset which has the same # of observations for each symbol.
#
bysyms = df.groupby(sym).size()
idx = df.index.unique()
onumsyms = len(bysyms)
minlen = int(round(len(idx) * .9 ))
keep = bysyms[bysyms > minlen]
u = df[ df[sym].isin(keep.index) ]
numsyms = len(keep)
log.info('Got rid of %d/%d symbols',(numsyms-onumsyms),onumsyms)
u.replace(0,np.nan,inplace=True)
u.replace([np.inf, -np.inf], np.nan,inplace=True)
u.sort_index(inplace=True)
uidx = u.index.unique()
# groupby and reindex magic
z = u.groupby(sym).apply(
lambda x: x.reindex(uidx).ffill()).reset_index(0,drop=True)
# badz = z[z.isnull().any(axis=1)]
# if len(badz.index) > 0 :
# badtimes = badz.index.unique().values
# z.drop( badtimes, inplace=True )
# for dt in badtimes:
# log.info('removed %s for NaNs',pd.to_datetime(str(dt)).strftime(
# '%Y-%m-%d'))
return z
def prep_univ( dateTime, symbol,
open, high, low, close, volume,
product, instrument='STK', multiplier=1.0,expiry=None,
strike=None,adv_days=20,sd_days=20, open2close_returns=True,
scaleAndCenter=False, **more_cols) :
# constructs universe appropriate for use with simulator; any additional columns
# passed-in via ellipsis will be added to table as named
#
U = pd.DataFrame({'Sym': symbol,
'Product' : product, 'Instrument':instrument,
'Multiplier': 1.0, 'Expiry': None, 'Strike':None,
'Open':open,'High':high, 'Low':low, 'Close':close,
'Volume':volume }, index=dateTime )
U = U[ SIM_COLS ]
if len(more_cols) > 0:
U = pd.concat( [U, pd.DataFrame(more_cols)], axis=1 )
U.reset_index( inplace=True)
U.sort_values(['Sym','Date'],inplace=True)
U.Date = pd.to_datetime(U.Date)
U.set_index('Date',inplace=True)
if scaleAndCenter :
log.debug('prep_univ: scaling & centering')
raw_scaled = U.groupby('Sym').transform(
lambda x : (x - x.mean())/x.std())
U = pd.concat([ u.Sym, raw_scaled], axis=1)
# calculate adv, returns, fwd_returns & change in volume
U['ADV'] = U.groupby('Sym')['Volume'].apply(
pd.rolling_mean, adv_days, 1).shift()
U['DeltaV'] = U.groupby('Sym')['Volume'].transform(
lambda x : np.log(x / x.shift()) )
U['Return'] = U.groupby('Sym')['Close'].transform(
lambda x : np.log(x / x.shift()) )
U['Fwd_Close'] = U.groupby('Sym')['Close'].shift(-1)
U['Fwd_Return'] = U.groupby('Sym')['Close'].transform(
lambda x : np.log(x / x.shift()).shift(-1) ) # fwd.returns
U['SD'] = U.groupby('Sym')['Return'].apply(
pd.rolling_std, sd_days, 1).shift()
if open2close_returns:
U['Fwd_Open'] = U.groupby('Sym')['Open'].shift(-1)
U['Fwd_COReturn'] = np.divide(np.add( U.Fwd_Open, -U.Close ),U.Close)
U.ffill(inplace=True)
U.sort_index(inplace=True)
return U
# simple, default strategy: equal weight universe on daily basis
def eq_wt( U, cfg, kvargs ) :
#pdb.set_trace()
U.Weight = 1/float(len(U.index))
return U
# given today's Universe U and Yesterday's Y, set U's
# Prev_Weight and Prev_Qty to Y's Weight & Qty
# TODO: clean-up
def _getprevs( U, Y ) :
# TODO: surely there's a cleaner way to do this...
wts = Y.reset_index()[['Sym','Weight']]
wts.columns = ['Sym','Prev_Weight']
pwts = U[['Sym']].merge( wts, on = 'Sym' )['Prev_Weight']
U.Prev_Weight=pwts.values
qts = Y.reset_index()[['Sym','Qty']]
qts.columns = ['Sym','Prev_Qty']
pqts = U[['Sym']].merge( qts, on = 'Sym' )['Prev_Qty']
U.Prev_Qty=pqts.values
# functor to run strategy each day and update tbls ...
# TODO: clean-up
def __sim ( U, FUN, cfg, B, kvargs) :
# run sim to set weights
U = FUN( U, cfg, kvargs)
# set prev values for weight & qty...
Y = kvargs.pop('_Y', None)
if Y is not None and not np.all(Y.index==U.index):
_getprevs(U,Y)
loop = 1 + int(kvargs.pop('_L'))
else:
loop = 0
kvargs['_L'] = loop
kvargs['_Y'] = U
bb = B.iloc[loop]
# fill-out trade details
NAV = bb.NAV
tospend = NAV/U.Weight
U.Qty = np.round((NAV*U.Weight) / (U.Multiplier*U.Close))
U.Trade_Qty = U.Qty - U.Prev_Qty
fbps = 1e-4 * cfg['FrictionInBps'](U,cfg,kvargs)
U.Trade_Fric = U.Trade_Qty * U.Close * U.Multiplier * fbps
U.PNL = (U.Fwd_Close - U.Close) * U.Qty * U.Multiplier
U.NET_PNL = U.PNL - U.Trade_Fric
# today's balances are based on yesterday's posns...
longs = U[U.Qty > 0]
shorts = U[U.Qty < 0]
trades = U[U.Trade_Qty != 0]
bb.Friction = U.Trade_Fric.sum()
bb.PNL = U.PNL.sum()
bb.NET_PNL = U.NET_PNL.sum()
bb.Longs = len(longs.index)
bb.Shorts = len(shorts.index)
bb.Long_Dlrs = (longs.Close * longs.Multiplier * longs.Qty).sum()
bb.Short_Dlrs = (shorts.Close * shorts.Multiplier * shorts.Qty).sum()
bb.Num_Trades = len(trades.index)
bb.Turnover = (trades.Close * trades.Multiplier
* trades.Trade_Qty.abs()).sum()/NAV
if loop > 0 :
yb = B.iloc[loop-1]
ynav = yb.NAV
tnav = ynav + yb.NET_PNL
bb.NAV = tnav
bb.NET_Return = (tnav-ynav)/ynav
B.iloc[loop] = bb
# pdb.set_trace()
return U
def sim( univ, sim_FUN=eq_wt, cfg=DEF_SIM_CFG.copy(), kvargs={} ) :
""" simulator: runs simulation and returns a table of activity and balances.
args:
univ - historical data that's been produced by prep_univ
sim_FUN - strategy function. by default, equal weights univ.
cfg - cfg info. by default
kvargs - strat-specific extra data in a dict
"""
#
t0 = time.time()
all_times = univ.index.unique().values
# prepare writable/output side of universe
W = pd.DataFrame( columns=SIM_COLS_OUT, index = univ.index).fillna(0.0)
U = pd.concat( [univ, W], axis=1 )
# create balances table: one per day
B = pd.DataFrame( columns = SIM_COL_BALS, index = all_times ).fillna(0.0)
B.NAV = cfg['InitBal']
# 'daily' loop
Z = U.groupby(U.index).apply( __sim, FUN=sim_FUN,
cfg=cfg, B=B, kvargs=kvargs )
log.info('ran over %d days and %d rows in %d secs', len(all_times),
len(U.index),time.time()-t0)
# summarize results a bit more...?
#ts=xts(B$Net.Return,order.by=B$DateTime)
# return universe and balances
#list(U=U,B=B, ts=ts)
return Z, B
def sharpe(Returns) :
return np.sqrt(252) * np.mean(Returns)/np.std(Returns)
def random_strat( U, cfg, kvargs ) :
# random portfolio strategy: picks 'num_names' randomly
nnames = kvargs.get('num_names',10)
names = random.sample(U.Sym, nnames )
U.Weight = np.where( U.Sym.isin( names ), 1/float(nnames), 0 )
return U
def best_strat( U, cfg, kvargs ) :
# portfolio strategy: picks 'num_names' based on trailing return
nnames = kvargs.get('num_names',10)
#pdb.set_trace()
best = U.sort_values('Return',ascending=False,
na_position='last')['Sym'].head(10).values
U.Weight = np.where( U.Sym.isin( best ), 1/float(nnames), 0 )
return U
def worst_strat( U, cfg, kvargs ) :
# portfolio strategy: picks 'num_names' based on trailing return
nnames = kvargs.get('num_names',10)
#pdb.set_trace()
worst = U.sort_values('Return',ascending=True,
na_position='last')['Sym'].head(10).values
U.Weight = np.where( U.Sym.isin( worst ), 1/float(nnames), 0 )
return U
def rtest(U,FUN=random_strat, runs=10):
# run given strat repeatedly, plotting NAVs and Returning them
# nb: this only makes sense if the strategy is random...
# run random_strat 'runs' times and plot NAVs
N = None
for i in range(runs) :
_,b = sim( U, sim_FUN=FUN )
n = pd.DataFrame(b.NAV)
N = n if N is None else pd.concat([N,n],axis=1)
N.plot(legend=False)
return N
def sim_test():
# dev driver
f = 'U.pkl'
P = pickle.load(open(f))
log.info('loaded <%s>',f)
P.describe()
U = P[P.index >= '2005-01-01']
U.describe()
import sim
_,B = sim.sim(U)
#plot NAV
B.NAV.plot(title='Equal Weight Everyone')
return B
| gpl-3.0 |
alexeyum/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
Gabriel-p/mcs_rot_angles | aux_modules/validation_set.py | 1 | 10176 |
import os
from astropy.io import ascii
from astropy.table import Table
from astropy.coordinates import Distance, Angle, SkyCoord
from astropy import units as u
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
# Change path so that we can import functions from the 'modules/' folder.
sys.path.insert(0, sys.path[0].replace('aux_', ''))
import readData
import MCs_data
def zDist(N):
"""
This function generates a uniform spread of vertical distances, in the
range (-z_dist, +z_dist).
"""
# Define maximum vertical distance (in parsec)
z_dist = 5000.
# Generate N random z' vertical distances, in parsec.
# To generate the *same* values each time the code is executed, fix the
# random seed to any integer value.
# np.random.seed(12345)
z_prime = np.random.uniform(-z_dist, z_dist, N)
return z_prime
def invertDist(incl, theta, ra_0, dec_0, D_0, ra, dec, z_prime):
"""
Inverted distance in parsecs (D) from Eq (7) in
van der Marel & Cioni (2001) using Eqs (1), (2), (3).
"""
# Express everything in radians.
incl, theta = np.deg2rad(incl), np.deg2rad(theta)
ra_0, dec_0, ra, dec = ra_0.rad, dec_0.rad, np.deg2rad(ra), np.deg2rad(dec)
# cos(rho)
A = np.cos(dec) * np.cos(dec_0) * np.cos(ra - ra_0) +\
np.sin(dec) * np.sin(dec_0)
# sin(rho) * cos(phi)
B = -np.cos(dec) * np.sin(ra - ra_0)
# sin(rho) * sin(phi)
C = np.sin(dec) * np.cos(dec_0) -\
np.cos(dec) * np.sin(dec_0) * np.cos(ra - ra_0)
# Eq (7)
D = (z_prime - D_0.value * np.cos(incl)) /\
(np.sin(incl) * (C * np.cos(theta) - B * np.sin(theta)) -
A * np.cos(incl))
return D
def rho_phi(ra, dec, glx_ctr):
"""
Obtain the angular distance between (ra, dec) coordinates and the center
of the galaxy (rho), and its position angle (phi).
"""
# Store clusters' (ra, dec) coordinates in degrees.
coords = SkyCoord(list(zip(*[ra, dec])), unit=(u.deg, u.deg))
rho = coords.separation(glx_ctr)
# Position angle between center and coordinates. This is the angle between
# the positive y axis (North) counter-clockwise towards the negative x
# axis (East).
Phi = glx_ctr.position_angle(coords)
# This is the angle measured counter-clockwise from the x positive axis
# (West).
phi = Phi + Angle('90d')
return rho, phi
def xyz_coords(rho, phi, D_0, r_dist):
'''
Obtain coordinates in the (x,y,z) system of van der Marel & Cioni (2001),
Eq (5).
Values (x, y,z) returned in Kpc.
'''
d_kpc = Distance((10**(0.2 * (np.asarray(r_dist) + 5.))) / 1000.,
unit=u.kpc)
x = d_kpc * np.sin(rho.radian) * np.cos(phi.radian)
y = d_kpc * np.sin(rho.radian) * np.sin(phi.radian)
z = D_0.kpc * u.kpc - d_kpc * np.cos(rho.radian)
x, y, z = x.value, y.value, z.value
return np.array([x, y, z])
def outData(gal, gal_data, dist_mod, e_dm):
"""
Write data to output 'xxx_input_synth.dat' file ('xxx' stands for the
processed galaxy.)
"""
data = Table(
[gal_data['Name'], gal_data['ra'], gal_data['dec'], dist_mod, e_dm,
gal_data['log(age)']],
names=['Name', 'ra', 'dec', 'dist_mod', 'e_dm', 'log(age)'])
with open(gal.lower() + "_input_synth.dat", 'w') as f:
ascii.write(data, f, format='fixed_width', delimiter=' ')
def inv_trans_eqs(x_p, y_p, z_p, theta, inc):
"""
Inverse set of equations. Transform inclined plane system (x',y',z')
into face on sky system (x,y,z).
"""
x = x_p * np.cos(theta) - y_p * np.cos(inc) * np.sin(theta) -\
z_p * np.sin(inc) * np.sin(theta)
y = x_p * np.sin(theta) + y_p * np.cos(inc) * np.cos(theta) +\
z_p * np.sin(inc) * np.cos(theta)
z = -1. * y_p * np.sin(inc) + z_p * np.cos(inc)
return x, y, z
def make_plot(gal_name, incl, theta, cl_xyz, dm):
"""
Original link for plotting intersecting planes:
http://stackoverflow.com/a/14825951/1391441
"""
# Make plot.
fig = plt.figure()
ax = Axes3D(fig)
# Placement 0, 0 is the bottom left, 1, 1 is the top right.
ax.text2D(
0.4, 0.95, r"${}:\;(\Theta, i) = ({}, {})$".format(
gal_name, theta - 90., incl),
transform=ax.transAxes, fontsize=15, color='red')
# Express in radians for calculations.
incl, theta = np.deg2rad(incl), np.deg2rad(theta)
# Plot clusters.
x_cl, y_cl, z_cl = cl_xyz
SC = ax.scatter(x_cl, z_cl, y_cl, c=dm, s=50)
min_X, max_X = min(x_cl) - 2., max(x_cl) + 2.
min_Y, max_Y = min(y_cl) - 2., max(y_cl) + 2.
min_Z, max_Z = min(z_cl) - 2., max(z_cl) + 2.
# x,y plane.
X, Y = np.meshgrid([min_X, max_X], [min_Y, max_Y])
Z = np.zeros((2, 2))
# Plot x,y plane.
ax.plot_surface(X, Z, Y, color='gray', alpha=.1, linewidth=0, zorder=1)
# Axis of x,y plane.
# x axis.
ax.plot([min_X, max_X], [0., 0.], [0., 0.], ls='--', c='k', zorder=4)
# Arrow head pointing in the positive x direction.
ax.quiver(max_X, 0., 0., max_X, 0., 0., arrow_length_ratio=.5,
length=.1, color='k')
ax.text(max_X, 0., -.5, 'x', 'x')
# y axis.
ax.plot([0., 0.], [0., 0.], [0., max_Y], ls='--', c='k')
# Arrow head pointing in the positive y direction.
ax.quiver(0., 0., max_Y, 0., 0., max_Y, arrow_length_ratio=.8,
length=.1, color='k')
ax.plot([0., 0.], [0., 0.], [min_Y, 0.], ls='--', c='k')
ax.text(-.5, 0., max_Y, 'y', 'y')
#
# A plane is a*x+b*y+c*z+d=0, [a,b,c] is the normal.
a, b, c, d = -1. * np.sin(theta) * np.sin(incl),\
np.cos(theta) * np.sin(incl), np.cos(incl), 0.
# print('a/c,b/c,1,d/c:', a / c, b / c, 1., d / c)
# Rotated plane.
X2_t, Y2_t = np.meshgrid([min_X, max_X], [0, max_Y])
Z2_t = (-a * X2_t - b * Y2_t) / c
X2_b, Y2_b = np.meshgrid([min_X, max_X], [min_Y, 0])
Z2_b = (-a * X2_b - b * Y2_b) / c
# Top half of first x',y' inclined plane.
ax.plot_surface(X2_t, Z2_t, Y2_t, color='red', alpha=.1, lw=0, zorder=3)
# Bottom half of inclined plane.
ax.plot_surface(X2_t, Z2_b, Y2_b, color='red', alpha=.1, lw=0, zorder=-1)
# Axis of x',y' plane.
# x' axis.
x_min, y_min, z_min = inv_trans_eqs(min_X, 0., 0., theta, incl)
x_max, y_max, z_max = inv_trans_eqs(max_X, 0., 0., theta, incl)
ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='b')
# Arrow head pointing in the positive x' direction.
ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
arrow_length_ratio=.7)
ax.text(x_max, z_max, y_max - .5, "x'", 'x', color='b')
# y' axis.
x_min, y_min, z_min = inv_trans_eqs(0., min_Y, 0., theta, incl)
x_max, y_max, z_max = inv_trans_eqs(0., max_Y, 0., theta, incl)
ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='g')
# Arrow head pointing in the positive y' direction.
ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
arrow_length_ratio=.9, color='g')
ax.text(x_max - .5, z_max, y_max, "y'", 'y', color='g')
# # z' axis.
# x_min, y_min, z_min = inv_trans_eqs(0., 0, min_Z, theta, incl)
# x_max, y_max, z_max = inv_trans_eqs(0., 0, max_Z, theta, incl)
# ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='y')
# # Arrow head pointing in the positive z' direction.
# ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
# arrow_length_ratio=.9, color='y')
# ax.text(x_max - .5, z_max, y_max, "z'", 'z', color='y')
ax.set_xlabel('x (Kpc)')
ax.set_ylabel('z (Kpc)')
ax.set_ylim(max_Y, min_Y)
ax.set_zlabel('y (Kpc)')
plt.colorbar(SC, shrink=0.9, aspect=25)
ax.axis('equal')
ax.axis('tight')
# This controls the initial orientation of the displayed 3D plot.
# ‘elev’ stores the elevation angle in the z plane. ‘azim’ stores the
# azimuth angle in the x,y plane.
ax.view_init(elev=0., azim=-90.)
plt.show()
# plt.savefig()
def main():
"""
"""
# Define inclination angles (i, Theta) (SMC first, LMC second).
# 'Theta' is the PA (position angle) measured from the North (positive
# y axis in van der Marel et al. 2002, Fig 3)
rot_angles = ((60, 150.), (30, 140.))
# Root path.
r_path = os.path.realpath(__file__)[:-30]
# Read input data for both galaxies from file (smc_data, lmc_data)
gal_data = readData.main(r_path)
for gal, gal_name in enumerate(['SMC', 'LMC']):
print("Generating data for {}".format(gal_name))
incl, Theta = rot_angles[gal]
# 'theta' is the position angle measured from the West (positive
# x axis), used by Eq (7) in van der Marel & Cioni (2001).
theta = Theta + 90.
# Center coordinates and distance for this galaxy.
gal_center, D_0, e_gal_dist = MCs_data.MCs_data(gal)
ra_0, dec_0 = gal_center.ra, gal_center.dec
# Center coordinates for observed clusters in this galaxy.
ra, dec = gal_data[gal]['ra'], gal_data[gal]['dec']
# Generate N random vertical distances (z'), in parsec.
z_prime = zDist(len(ra))
# Distance to clusters in parsecs.
D = invertDist(incl, theta, ra_0, dec_0, D_0, ra, dec, z_prime)
# Convert to distance moduli.
dist_mod = np.round(-5. + 5. * np.log10(D), 2)
# This line below uses the actual distance moduli found by ASteCA.
# dist_mod = gal_data[gal]['dist_mod']
# Random errors for distance moduli.
e_dm = np.round(np.random.uniform(.03, .09, len(ra)), 2)
# Store data in output file.
outData(gal_name, gal_data[gal], dist_mod, e_dm)
print("Output data stored")
# Obtain angular projected distance and position angle for the
# clusters in the galaxy.
rho, phi = rho_phi(ra, dec, gal_center)
cl_xyz = xyz_coords(rho, phi, D_0, dist_mod)
make_plot(gal_name, incl, theta, cl_xyz, dist_mod)
print("Plot saved.")
if __name__ == '__main__':
main()
| gpl-3.0 |
annahs/atmos_research | LEO_calc_coating_from_meas_scat_amp_and_write_to_db.py | 1 | 3857 | import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
from pprint import pprint
import sqlite3
import calendar
from datetime import datetime
#id INTEGER PRIMARY KEY AUTOINCREMENT,
#sp2b_file TEXT,
#file_index INT,
#instr TEXT,
#instr_locn TEXT,
#particle_type TEXT,
#particle_dia FLOAT,
#unix_ts_utc FLOAT,
#actual_scat_amp FLOAT,
#actual_peak_pos INT,
#FF_scat_amp FLOAT,
#FF_peak_pos INT,
#FF_gauss_width FLOAT,
#zeroX_to_peak FLOAT,
#LF_scat_amp FLOAT,
#incand_amp FLOAT,
#lag_time_fit_to_incand FLOAT,
#LF_baseline_pct_diff FLOAT,
#rBC_mass_fg FLOAT,
#coat_thickness_nm FLOAT,
#coat_thickness_from_actual_scat_amp FLOAT
#UNIQUE (sp2b_file, file_index, instr)
#connect to database
conn = sqlite3.connect('C:/projects/dbs/SP2_data.db')
c = conn.cursor()
c2 = conn.cursor()
instrument = 'UBCSP2'
instrument_locn = 'WHI'
type_particle = 'incand'
start_date = '20110105'
end_date = '20120601'
lookup_file = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/lookup_tables/coating_lookup_table_WHI_2012_UBCSP2-nc(2p26,1p26).lupckl'
rBC_density = 1.8
incand_sat = 3750
lookup = open(lookup_file, 'r')
lookup_table = pickle.load(lookup)
lookup.close()
c.execute('''SELECT * FROM SP2_coating_analysis''')
names = [description[0] for description in c.description]
pprint(names)
begin_data = calendar.timegm(datetime.strptime(start_date,'%Y%m%d').timetuple())
end_data = calendar.timegm(datetime.strptime(end_date,'%Y%m%d').timetuple())
def get_rBC_mass(incand_pk_ht, year):
if year == 2012:
rBC_mass = 0.003043*incand_pk_ht + 0.24826 #AD corrected linear calibration for UBCSP2 at WHI 2012
if year == 2010:
rBC_mass = 0.01081*incand_pk_ht - 0.32619 #AD corrected linear calibration for ECSP2 at WHI 2010
return rBC_mass
def get_coating_thickness(BC_VED,scat_amp,coating_lookup_table):
#get the coating thicknesses from the lookup table which is a dictionary of dictionaries, the 1st keyed with BC core size and the second being coating thicknesses keyed with calc scat amps
core_diameters = sorted(coating_lookup_table.keys())
prev_diameter = core_diameters[0]
for core_diameter in core_diameters:
if core_diameter > BC_VED:
core_dia_to_use = prev_diameter
break
prev_diameter = core_diameter
#now get the coating thickness for the scat_amp this is the coating thickness based on the raw scattering max
scattering_amps = sorted(coating_lookup_table[core_dia_to_use].keys())
prev_amp = scattering_amps[0]
for scattering_amp in scattering_amps:
if scat_amp < scattering_amp:
scat_amp_to_use = prev_amp
break
prev_amp = scattering_amp
scat_coating_thickness = coating_lookup_table[core_dia_to_use].get(scat_amp_to_use, np.nan) # returns value for the key, or none
return scat_coating_thickness
LOG_EVERY_N = 10000
i = 0
for row in c.execute('''SELECT incand_amp, LF_scat_amp, unix_ts_utc, sp2b_file, file_index, instr FROM SP2_coating_analysis
WHERE instr=? and instr_locn=? and particle_type=? and incand_amp<? and unix_ts_utc>=? and unix_ts_utc<?''',
(instrument,instrument_locn,type_particle,incand_sat,begin_data,end_data)):
incand_amp = row[0]
LF_amp = row[1]
event_time = datetime.utcfromtimestamp(row[2])
file = row[3]
index = row[4]
instrt = row[5]
rBC_mass = get_rBC_mass(incand_amp, event_time.year)
if rBC_mass >= 0.25:
rBC_VED = (((rBC_mass/(10**15*rBC_density))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
coat_th = get_coating_thickness(rBC_VED,LF_amp,lookup_table)
else:
rBC_VED = None
coat_th = None
c2.execute('''UPDATE SP2_coating_analysis SET coat_thickness_from_actual_scat_amp=? WHERE sp2b_file=? and file_index=? and instr=?''', (coat_th, file,index,instrt))
i+=1
if (i % LOG_EVERY_N) == 0:
print 'record: ', i
conn.commit()
conn.close()
| mit |
great-expectations/great_expectations | great_expectations/expectations/core/expect_column_values_to_be_in_type_list.py | 1 | 17690 | import logging
from typing import Dict, Optional
import numpy as np
import pandas as pd
from great_expectations.core import ExpectationConfiguration
from great_expectations.exceptions import InvalidExpectationConfigurationError
from great_expectations.execution_engine import (
ExecutionEngine,
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.core.expect_column_values_to_be_of_type import (
_get_dialect_type_module,
_native_type_type_map,
)
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.registry import get_metric_kwargs
from great_expectations.expectations.util import render_evaluation_parameter_string
from great_expectations.render.renderer.renderer import renderer
from great_expectations.render.types import RenderedStringTemplateContent
from great_expectations.render.util import (
num_to_str,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
from great_expectations.validator.validation_graph import MetricConfiguration
logger = logging.getLogger(__name__)
try:
import pyspark.sql.types as sparktypes
except ImportError as e:
logger.debug(str(e))
logger.debug(
"Unable to load spark context; install optional spark dependency for support."
)
class ExpectColumnValuesToBeInTypeList(ColumnMapExpectation):
"""
Expect a column to contain values from a specified type list.
expect_column_values_to_be_in_type_list is a \
:func:`column_map_expectation <great_expectations.execution_engine.execution_engine.MetaExecutionEngine
.column_map_expectation>` for typed-column backends,
and also for PandasDataset where the column dtype provides an unambiguous constraints (any dtype except
'object'). For PandasDataset columns with dtype of 'object' expect_column_values_to_be_of_type is a
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>` and will
independently check each row's type.
Args:
column (str): \
The column name.
type_list (str): \
A list of strings representing the data type that each column should have as entries. Valid types are
defined by the current backend implementation and are dynamically loaded. For example, valid types for
PandasDataset include any numpy dtype values (such as 'int64') or native python types (such as 'int'),
whereas valid types for a SqlAlchemyDataset include types named by the current driver such as 'INTEGER'
in most SQL dialects and 'TEXT' in dialects such as postgresql. Valid types for SparkDFDataset include
'StringType', 'BooleanType' and other pyspark-defined type names.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See also:
:func:`expect_column_values_to_be_of_type \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_of_type>`
"""
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "production",
"package": "great_expectations",
"tags": ["core expectation", "column map expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
}
map_metric = "column_values.in_type_list"
success_keys = (
"type_list",
"mostly",
)
default_kwarg_values = {
"type_list": None,
"mostly": 1,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
super().validate_configuration(configuration)
try:
assert "type_list" in configuration.kwargs, "type_list is required"
assert (
isinstance(configuration.kwargs["type_list"], (list, dict))
or configuration.kwargs["type_list"] is None
), "type_list must be a list or None"
if isinstance(configuration.kwargs["type_list"], dict):
assert (
"$PARAMETER" in configuration.kwargs["type_list"]
), 'Evaluation Parameter dict for type_list kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "type_list", "mostly", "row_condition", "condition_parser"],
)
if params["type_list"] is not None:
for i, v in enumerate(params["type_list"]):
params["v__" + str(i)] = v
values_string = " ".join(
["$v__" + str(i) for i, v in enumerate(params["type_list"])]
)
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
if include_column_name:
template_str = (
"$column value types must belong to this set: "
+ values_string
+ ", at least $mostly_pct % of the time."
)
else:
template_str = (
"value types must belong to this set: "
+ values_string
+ ", at least $mostly_pct % of the time."
)
else:
if include_column_name:
template_str = (
"$column value types must belong to this set: "
+ values_string
+ "."
)
else:
template_str = (
"value types must belong to this set: " + values_string + "."
)
else:
if include_column_name:
template_str = "$column value types may be any value, but observed value will be reported"
else:
template_str = (
"value types may be any value, but observed value will be reported"
)
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
def _validate_pandas(
self,
actual_column_type,
expected_types_list,
):
if expected_types_list is None:
success = True
else:
comp_types = []
for type_ in expected_types_list:
try:
comp_types.append(np.dtype(type_).type)
comp_types.append(np.dtype(type_))
except TypeError:
try:
pd_type = getattr(pd, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
try:
pd_type = getattr(pd.core.dtypes.dtypes, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
native_type = _native_type_type_map(type_)
if native_type is not None:
comp_types.extend(native_type)
success = actual_column_type in comp_types
return {
"success": success,
"result": {"observed_value": actual_column_type.type.__name__},
}
def _validate_sqlalchemy(
self, actual_column_type, expected_types_list, execution_engine
):
# Our goal is to be as explicit as possible. We will match the dialect
# if that is possible. If there is no dialect available, we *will*
# match against a top-level SqlAlchemy type.
#
# This is intended to be a conservative approach.
#
# In particular, we *exclude* types that would be valid under an ORM
# such as "float" for postgresql with this approach
if expected_types_list is None:
success = True
else:
types = []
type_module = _get_dialect_type_module(execution_engine=execution_engine)
for type_ in expected_types_list:
try:
type_class = getattr(type_module, type_)
types.append(type_class)
except AttributeError:
logger.debug("Unrecognized type: %s" % type_)
if len(types) == 0:
logger.warning(
"No recognized sqlalchemy types in type_list for current dialect."
)
types = tuple(types)
success = isinstance(actual_column_type, types)
return {
"success": success,
"result": {"observed_value": type(actual_column_type).__name__},
}
def _validate_spark(
self,
actual_column_type,
expected_types_list,
):
if expected_types_list is None:
success = True
else:
types = []
for type_ in expected_types_list:
try:
type_class = getattr(sparktypes, type_)
types.append(type_class)
except AttributeError:
logger.debug("Unrecognized type: %s" % type_)
if len(types) == 0:
raise ValueError("No recognized spark types in expected_types_list")
types = tuple(types)
success = isinstance(actual_column_type, types)
return {
"success": success,
"result": {"observed_value": type(actual_column_type).__name__},
}
def get_validation_dependencies(
self,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
# This calls TableExpectation.get_validation_dependencies to set baseline dependencies for the aggregate version
# of the expectation.
# We need to keep this as super(ColumnMapExpectation, self), which calls
# TableExpectation.get_validation_dependencies instead of ColumnMapExpectation.get_validation_dependencies.
# This is because the map version of this expectation is only supported for Pandas, so we want the aggregate
# version for the other backends.
dependencies = super(ColumnMapExpectation, self).get_validation_dependencies(
configuration, execution_engine, runtime_configuration
)
# Only PandasExecutionEngine supports the column map version of the expectation.
if isinstance(execution_engine, PandasExecutionEngine):
column_name = configuration.kwargs.get("column")
expected_types_list = configuration.kwargs.get("type_list")
metric_kwargs = get_metric_kwargs(
configuration=configuration,
metric_name="table.column_types",
runtime_configuration=runtime_configuration,
)
metric_domain_kwargs = metric_kwargs.get("metric_domain_kwargs")
metric_value_kwargs = metric_kwargs.get("metric_value_kwargs")
table_column_types_configuration = MetricConfiguration(
"table.column_types",
metric_domain_kwargs=metric_domain_kwargs,
metric_value_kwargs=metric_value_kwargs,
)
actual_column_types_list = execution_engine.resolve_metrics(
[table_column_types_configuration]
)[table_column_types_configuration.id]
actual_column_type = [
type_dict["type"]
for type_dict in actual_column_types_list
if type_dict["name"] == column_name
][0]
# only use column map version if column dtype is object
if (
actual_column_type.type.__name__ == "object_"
and expected_types_list is not None
):
# this resets dependencies using ColumnMapExpectation.get_validation_dependencies
dependencies = super().get_validation_dependencies(
configuration, execution_engine, runtime_configuration
)
# this adds table.column_types dependency for both aggregate and map versions of expectation
column_types_metric_kwargs = get_metric_kwargs(
metric_name="table.column_types",
configuration=configuration,
runtime_configuration=runtime_configuration,
)
dependencies["metrics"]["table.column_types"] = MetricConfiguration(
metric_name="table.column_types",
metric_domain_kwargs=column_types_metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=column_types_metric_kwargs["metric_value_kwargs"],
)
return dependencies
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
column_name = configuration.kwargs.get("column")
expected_types_list = configuration.kwargs.get("type_list")
actual_column_types_list = metrics.get("table.column_types")
actual_column_type = [
type_dict["type"]
for type_dict in actual_column_types_list
if type_dict["name"] == column_name
][0]
if isinstance(execution_engine, PandasExecutionEngine):
# only PandasExecutionEngine supports map version of expectation and
# only when column type is object
if (
actual_column_type.type.__name__ == "object_"
and expected_types_list is not None
):
# this calls ColumnMapMetric._validate
return super()._validate(
configuration, metrics, runtime_configuration, execution_engine
)
return self._validate_pandas(
actual_column_type=actual_column_type,
expected_types_list=expected_types_list,
)
elif isinstance(execution_engine, SqlAlchemyExecutionEngine):
return self._validate_sqlalchemy(
actual_column_type=actual_column_type,
expected_types_list=expected_types_list,
execution_engine=execution_engine,
)
elif isinstance(execution_engine, SparkDFExecutionEngine):
return self._validate_spark(
actual_column_type=actual_column_type,
expected_types_list=expected_types_list,
)
| apache-2.0 |
bongtrop/peach | tutorial/neural-networks/linear-prediction.py | 6 | 3386 | ################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/linear-prediction.py
# Using neural networks to predict number sequences
################################################################################
# A neural network can be used to predict future values of a sequence of
# numbers. Wold's Decomposition Theorem stablishes that any sequence can be
# split in a regular and predictable part and an innovation process (which is
# discrete white noise, and thus impredictable). The goal of this tutorial is
# to show how to use the neural network implementation of Peach to do this.
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import random
import peach as p
# First, we create the network, with only one layer with only one neuron in it.
# The neuron has many inputs and only one output. The activation function is the
# identity. This kind of neuron is usually known as ADALINE (Adaptive Linear
# Neuron, later Adaptive Linear Element). We use as learning algorithm the LMS
# algorithm.
N = 32
nn = p.FeedForward((N, 1), phi=p.Identity, lrule=p.LMS(0.05))
# The lists below will track the values of the sequence being predicted and of
# the error for plotting.
xlog = [ ]
ylog = [ ]
elog = [ ]
error = 1.
i = 0
x = zeros((N, 1), dtype=float) # Input is a column-vector.
while i < 2000 and error > 1.e-10:
# The sequence we will predict is the one generated by a cossinus. The next
# value of the function is the desired output of the neuron. The neuron will
# use past values to predict the unknown value. To spice things, we add some
# gaussian noise (actually, it might help the convergence).
d = cos(2.*pi/128. * i) + random.gauss(0., 0.01)
# Here, we activate the network to calculate the prediction.
y = nn(x)[0, 0] # Notice that we need to access the output
error = abs(d - y) # as a vector, since that's how the NN work.
nn.learn(x, d)
# We store the results to plot later.
xlog.append(d)
ylog.append(y)
elog.append(error)
# Here, we apply a delay in the sequence by shifting every value one
# position back. We are using N (=32) samples to make the prediction, but
# the code here makes no distinction and could be used with any number of
# coefficients in the prediction. The last value of the sequence is put in
# the [0] position of the vector.
x[1:] = x[:-1]
x[0] = d
i = i + 1
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``linear-prediction.png``.
try:
import pylab
pylab.subplot(211)
pylab.hold(True)
pylab.grid(True)
pylab.plot(array(xlog), 'b--')
pylab.plot(array(ylog), 'g')
pylab.plot(array(elog), 'r:')
pylab.legend([ "$x$", "$y$", "$error$" ])
pylab.subplot(212)
pylab.grid(True)
pylab.stem(arange(0, N), reshape(nn[0].weights, (N,)), "k-", "ko", "k-")
pylab.xlim([0, N-1])
pylab.savefig("linear-prediction.png")
except ImportError:
print "After %d iterations:" % (len(elog),)
print nn[0].weights | lgpl-2.1 |
cs207-project/TimeSeries | procs/_corr.py | 1 | 4794 | import numpy.fft as nfft
import numpy as np
import timeseries as ts
from scipy.stats import norm
# import pyfftw
import sys
#sys.path.append("/Users/yuhantang/CS207/TimeSeries/procs")
from .interface import *
def createfromlist(l):
d = new_darray(len(l))
for i in range(0,len(l)):
darray_set(d,i,l[i])
return d
def tsmaker(m, s, j):
meta={}
meta['order'] = int(np.random.choice([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]))
meta['blarg'] = int(np.random.choice([1, 2]))
t = np.arange(0.0, 1.0, 0.01)
v = norm.pdf(t, m, s) + j*np.random.randn(100)
return meta, ts.TimeSeries(t, v)
def random_ts(a):
t = np.arange(0.0, 1.0, 0.01)
v = a*np.random.random(100)
return ts.TimeSeries(t, v)
def stand(x, m, s):
return (x-m)/s
def ccor(ts1, ts2):
"given two standardized time series, compute their cross-correlation using FFT"
# Get the next 2 th power 110 -> 128
next_2 = int(2**np.ceil(np.log(len(ts1.values()))))
#
ts1_value = ts1.values()
ts2_value = ts2.values()
ts1_container,ts2_container = [],[]
ts1_zero_container = [0]*len(ts1.values())
ts2_zero_container = [0]*len(ts2.values())
ts1_c_array,ts2_c_array = [None]*(len(ts1.values())*2),[None]*(len(ts2.values())*2)
ts1_c_array[::2] = ts1_value
ts1_c_array[1::2] = ts1_zero_container
ts2_c_array[::2] = ts2_value
ts2_c_array[1::2] = ts2_zero_container
for i in range(len(ts1_c_array)+1,next_2*2):
ts1_c_array.append(np.double(0))
for i in range(len(ts2_c_array)+1,next_2*2):
ts2_c_array.append(np.double(0))
ts1_c_array.insert(0,0)
ts2_c_array.insert(0,0)
ts1_c_array = createfromlist(np.double(ts1_c_array))
ts2_c_array = createfromlist(np.double(ts2_c_array))
four1(ts1_c_array,next_2,1)
four1(ts2_c_array,next_2,1)
for i in range(len(ts2.values())*2+1):
ts1_container.append(darray_get(ts1_c_array,i))
for j in range(len(ts1.values())*2+1):
ts2_container.append(darray_get(ts2_c_array,j))
ts1_fft = np.asarray(ts1_container[1::2]) + 1j * np.asarray(ts1_container[2::2])
ts2_fft = np.asarray(ts2_container[1::2]) + 1j * np.asarray(ts2_container[2::2])
ts1_fft = ts1_fft[:len(ts1)+1]
ts2_fft = ts2_fft[:len(ts2)+1]
# ifft part
ts1_ts2_conj = ts1_fft * np.conj(ts2_fft)
ts1_ts2_ifft_container = [0]*len(ts1_ts2_conj)*2
ts1_ts2_ifft_container[::2] = ts1_ts2_conj.real
ts1_ts2_ifft_container[1::2] = ts1_ts2_conj.imag
for i in range(len(ts1_ts2_conj)+1, next_2 *2):
ts1_ts2_ifft_container.append(0)
ts1_ts2_ifft_container.insert(0,0)
ts1_ts2_ifft_container = createfromlist(ts1_ts2_ifft_container)
four1(ts1_ts2_ifft_container, next_2, -1)
ts1_ts2_ifft_container_python = []
for i in range(len(ts1_ts2_conj)*2+1):
ts1_ts2_ifft_container_python.append(darray_get(ts1_ts2_ifft_container,i))
ccor_value = np.asarray(ts1_ts2_ifft_container_python[1::2])
return 1/len(ts1) * ccor_value
def max_corr_at_phase(ts1, ts2):
ccorts = ccor(ts1, ts2)
idx = np.argmax(ccorts)
maxcorr = ccorts[idx]
return idx, maxcorr
#The equation for the kernelized cross correlation is given at
#http://www.cs.tufts.edu/~roni/PUB/ecml09-tskernels.pdf
#normalize the kernel there by np.sqrt(K(x,x)K(y,y)) so that the correlation
#of a time series with itself is 1.
def kernel_corr(ts1, ts2, mult=1):
"compute a kernelized correlation so that we can get a real distance"
#your code here.
cross_correlation = ccor(ts1, ts2) * mult
corr_ts1, corr_ts2 = ccor(ts1, ts1) * mult, ccor(ts2, ts2) * mult
return np.sum(np.exp(cross_correlation))/np.sqrt(np.sum(np.exp(corr_ts1))*np.sum(np.exp(corr_ts2)))
#this is for a quick and dirty test of these functions
#you might need to add procs to pythonpath for this to work
if __name__ == "__main__":
print("HI")
_, t1 = tsmaker(0.5, 0.1, 0.01)
_, t2 = tsmaker(0.5, 0.1, 0.01)
print(t1.mean(), t1.std(), t2.mean(), t2.std())
import matplotlib.pyplot as plt
plt.plot(t1)
plt.plot(t2)
plt.show()
standts1 = stand(t1, t1.mean(), t1.std())
standts2 = stand(t2, t2.mean(), t2.std())
#print(type(standts1),'this is the type=================*********')
#assert 1 == 2
idx, mcorr = max_corr_at_phase(standts1, standts2)
print(idx, mcorr)
sumcorr = kernel_corr(standts1, standts2, mult=10)
print(sumcorr)
t3 = random_ts(2)
t4 = random_ts(3)
plt.plot(t3)
plt.plot(t4)
plt.show()
standts3 = stand(t3, t3.mean(), t3.std())
standts4 = stand(t4, t4.mean(), t4.std())
idx, mcorr = max_corr_at_phase(standts3, standts4)
print(idx, mcorr)
sumcorr = kernel_corr(standts3, standts4, mult=10)
print(sumcorr)
| mit |
benjaminoh1/tensorflowcookbook | Chapter 07/bag_of_words.py | 1 | 6082 | # Working with Bag of Words
#---------------------------------------
#
# In this example, we will download and preprocess the ham/spam
# text data. We will then use a one-hot-encoding to make a
# bag of words set of features to use in logistic regression.
#
# We will use these one-hot-vectors for logistic regression to
# predict if a text is spam or ham.
import tensorflow as tf
import matplotlib.pyplot as plt
import os
import numpy as np
import csv
import string
import requests
import io
from zipfile import ZipFile
from tensorflow.contrib import learn
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Start a graph session
sess = tf.Session()
# Check if data was downloaded, otherwise download it and save for future use
save_file_name = os.path.join('temp','temp_spam_data.csv')
if os.path.isfile(save_file_name):
text_data = []
with open(save_file_name, 'r') as temp_output_file:
reader = csv.reader(temp_output_file)
for row in reader:
text_data.append(row)
else:
zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'
r = requests.get(zip_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('SMSSpamCollection')
# Format Data
text_data = file.decode()
text_data = text_data.encode('ascii',errors='ignore')
text_data = text_data.decode().split('\n')
text_data = [x.split('\t') for x in text_data if len(x)>=1]
# And write to csv
with open(save_file_name, 'w') as temp_output_file:
writer = csv.writer(temp_output_file)
writer.writerows(text_data)
texts = [x[1] for x in text_data]
target = [x[0] for x in text_data]
# Relabel 'spam' as 1, 'ham' as 0
target = [1 if x=='spam' else 0 for x in target]
# Normalize text
# Lower case
texts = [x.lower() for x in texts]
# Remove punctuation
texts = [''.join(c for c in x if c not in string.punctuation) for x in texts]
# Remove numbers
texts = [''.join(c for c in x if c not in '0123456789') for x in texts]
# Trim extra whitespace
texts = [' '.join(x.split()) for x in texts]
# Plot histogram of text lengths
text_lengths = [len(x.split()) for x in texts]
text_lengths = [x for x in text_lengths if x < 50]
plt.hist(text_lengths, bins=25)
plt.title('Histogram of # of Words in Texts')
# Choose max text word length at 25
sentence_size = 25
min_word_freq = 3
# Setup vocabulary processor
vocab_processor = learn.preprocessing.VocabularyProcessor(sentence_size, min_frequency=min_word_freq)
# Have to fit transform to get length of unique words.
vocab_processor.fit_transform(texts)
embedding_size = len(vocab_processor.vocabulary_)
# Split up data set into train/test
train_indices = np.random.choice(len(texts), round(len(texts)*0.8), replace=False)
test_indices = np.array(list(set(range(len(texts))) - set(train_indices)))
texts_train = [x for ix, x in enumerate(texts) if ix in train_indices]
texts_test = [x for ix, x in enumerate(texts) if ix in test_indices]
target_train = [x for ix, x in enumerate(target) if ix in train_indices]
target_test = [x for ix, x in enumerate(target) if ix in test_indices]
# Setup Index Matrix for one-hot-encoding
identity_mat = tf.diag(tf.ones(shape=[embedding_size]))
# Create variables for logistic regression
A = tf.Variable(tf.random_normal(shape=[embedding_size,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Initialize placeholders
x_data = tf.placeholder(shape=[sentence_size], dtype=tf.int32)
y_target = tf.placeholder(shape=[1, 1], dtype=tf.float32)
# Text-Vocab Embedding
x_embed = tf.nn.embedding_lookup(identity_mat, x_data)
x_col_sums = tf.reduce_sum(x_embed, 0)
# Declare model operations
x_col_sums_2D = tf.expand_dims(x_col_sums, 0)
model_output = tf.add(tf.matmul(x_col_sums_2D, A), b)
# Declare loss function (Cross Entropy loss)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(model_output, y_target))
# Prediction operation
prediction = tf.sigmoid(model_output)
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.001)
train_step = my_opt.minimize(loss)
# Intitialize Variables
init = tf.initialize_all_variables()
sess.run(init)
# Start Logistic Regression
print('Starting Training Over {} Sentences.'.format(len(texts_train)))
loss_vec = []
train_acc_all = []
train_acc_avg = []
for ix, t in enumerate(vocab_processor.fit_transform(texts_train)):
y_data = [[target_train[ix]]]
sess.run(train_step, feed_dict={x_data: t, y_target: y_data})
temp_loss = sess.run(loss, feed_dict={x_data: t, y_target: y_data})
loss_vec.append(temp_loss)
if (ix+1)%10==0:
print('Training Observation #' + str(ix+1) + ': Loss = ' + str(temp_loss))
# Keep trailing average of past 50 observations accuracy
# Get prediction of single observation
[[temp_pred]] = sess.run(prediction, feed_dict={x_data:t, y_target:y_data})
# Get True/False if prediction is accurate
train_acc_temp = target_train[ix]==np.round(temp_pred)
train_acc_all.append(train_acc_temp)
if len(train_acc_all) >= 50:
train_acc_avg.append(np.mean(train_acc_all[-50:]))
# Get test set accuracy
print('Getting Test Set Accuracy For {} Sentences.'.format(len(texts_test)))
test_acc_all = []
for ix, t in enumerate(vocab_processor.fit_transform(texts_test)):
y_data = [[target_test[ix]]]
if (ix+1)%50==0:
print('Test Observation #' + str(ix+1))
# Keep trailing average of past 50 observations accuracy
# Get prediction of single observation
[[temp_pred]] = sess.run(prediction, feed_dict={x_data:t, y_target:y_data})
# Get True/False if prediction is accurate
test_acc_temp = target_test[ix]==np.round(temp_pred)
test_acc_all.append(test_acc_temp)
print('\nOverall Test Accuracy: {}'.format(np.mean(test_acc_all)))
# Plot training accuracy over time
plt.plot(range(len(train_acc_avg)), train_acc_avg, 'k-', label='Train Accuracy')
plt.title('Avg Training Acc Over Past 50 Generations')
plt.xlabel('Generation')
plt.ylabel('Training Accuracy')
plt.show() | mit |
dvro/scikit-protopy | protopy/base.py | 1 | 4528 | """Base and mixin classes for instance reduction techniques"""
# Author: Dayvid Victor <dvro@cin.ufpe.br>
# License: BSD Style
import warnings
from abc import ABCMeta, abstractmethod
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.neighbors.classification import KNeighborsClassifier
from sklearn.utils import check_array
from sklearn.externals import six
class InstanceReductionWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", InstanceReductionWarning)
class InstanceReductionBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for instance reduction estimators."""
@abstractmethod
def __init__(self):
pass
class InstanceReductionMixin(InstanceReductionBase, ClassifierMixin):
"""Mixin class for all instance reduction techniques"""
def set_classifier(self):
"""Sets the classified to be used in the instance reduction process
and classification.
Parameters
----------
classifier : classifier, following the KNeighborsClassifier style
(default = KNN)
y : array-like, shape = [n_samples]
Labels for X.
Returns
-------
P : array-like, shape = [indeterminated, n_features]
Resulting training set.
q : array-like, shape = [indertaminated]
Labels for P
"""
self.classifier = classifier
def reduce_data(self, X, y):
"""Perform the instance reduction procedure on the given training data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training set.0
y : array-like, shape = [n_samples]
Labels for X.
Returns
-------
X_ : array-like, shape = [indeterminated, n_features]
Resulting training set.
y_ : array-like, shape = [indertaminated]
Labels for X_
"""
pass
def fit(self, X, y, reduce_data=True):
"""
Fit the InstanceReduction model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
reduce_data : bool, flag indicating if the reduction would be performed
"""
self.X = X
self.y = y
if reduce_data:
self.reduce_data(X, y)
return self
def predict(self, X, n_neighbors=1):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
The default prediction is using KNeighborsClassifier, if the
instance reducition algorithm is to be performed with another
classifier, it should be explicited overwritten and explained
in the documentation.
"""
X = check_array(X)
if not hasattr(self, "X_") or self.X_ is None:
raise AttributeError("Model has not been trained yet.")
if not hasattr(self, "y_") or self.y_ is None:
raise AttributeError("Model has not been trained yet.")
if self.classifier == None:
self.classifier = KNeighborsClassifier(n_neighbors=n_neighbors)
self.classifier.fit(self.X_, self.y_)
return self.classifier.predict(X)
def predict_proba(self, X):
"""Return probability estimates for the test data X.
after a given prototype selection algorithm.
Parameters
----------
X : array, shape = (n_samples, n_features)
A 2-D array representing the test points.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
self.classifier.fit(self.X_, self.y_)
return self.classifier.predict_proba(X)
| bsd-2-clause |
ZenDevelopmentSystems/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 68 | 43439 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
SP2RC-Coding-Club/Codes | 13_07_2017/3D_slab_modes.py | 1 | 35096 |
#import pdb # pause code for debugging at pdb.set_trace()
import numpy as np
import toolbox as tool
import slab_functions as sf
from pysac.plot.mayavi_seed_streamlines import SeedStreamline
import matplotlib.pyplot as plt
from mayavi import mlab
import gc
#import move_seed_points as msp
import mayavi_plotting_functions as mpf
import dispersion_diagram
import img2vid as i2v
from functools import partial
import os
# ================================
# Preamble: set mode options and view parameters
# ================================
# What mode do you want? OPTIONS:
mode_options = ['slow-kink-surf', 'slow-saus-surf', 'slow-saus-body-3',
'slow-kink-body-3', 'slow-saus-body-2', 'slow-kink-body-2',
'slow-saus-body-1', 'slow-kink-body-1', 'fast-saus-body-1',
'fast-kink-body-1', 'fast-saus-body-2', 'fast-kink-body-2',
'fast-saus-body-3', 'fast-kink-body-3', 'fast-kink-surf',
'fast-saus-surf', 'shear-alfven', 'shear-alfven-broadband']
# Which angle shall we view from? OPTIONS:
view_options = ['front', 'front-parallel', 'top', 'top-parallel', 'front-top',
'front-side', 'front-top-side']
# Uniform lighting?
#uniform_light = True
uniform_light = False
show_density = False
show_density_pert = False
show_mag = False
show_mag_scale = False
show_mag_fade = False
show_mag_vec = False
show_vel_front = False
show_vel_front_pert = False
show_vel_top = False
show_vel_top_pert = False
show_disp_top = False
show_disp_front = False
show_axes = False
show_axis_labels = False
show_mini_axis = False
show_boundary = False
# Uncomment the parametrer you would like to see
# No density perturbations or vel/disp pert for alfven modes.
#show_density = True
#show_density_pert = True
show_mag = True
#show_mag_scale = True #must also have show_mag = True
#show_mag_fade = True
#show_mag_vec = True
#show_vel_front = True
#show_vel_front_pert = True
#show_vel_top = True
#show_vel_top_pert = True
#show_disp_top = True
#show_disp_front = True
show_axes = True
#show_axis_labels = True
show_mini_axis = True
show_boundary = True
# Visualisation modules in string form for file-names
vis_modules = [show_density, show_density_pert, show_mag, show_mag_scale,
show_mag_fade, show_mag_vec, show_vel_front, show_vel_front_pert,
show_vel_top, show_vel_top_pert, show_disp_top, show_disp_front]
vis_modules_strings = ['show_density', 'show_density_pert', 'show_mag', 'show_mag_scale',
'show_mag_fade', 'show_mag_vec', 'show_vel_front', 'show_vel_front_pert',
'show_vel_top', 'show_vel_top_pert', 'show_disp_top', 'show_disp_front']
vis_mod_string = ''
for i, j in enumerate(vis_modules):
if vis_modules[i]:
vis_mod_string = vis_mod_string + vis_modules_strings[i][5:] + '_'
# Set to True if you would like the dispersion diagram with chosen mode highlighted.
show_dispersion = False
#show_dispersion = True
# Wanna see the animation? Of course you do
#show_animation = False
show_animation = True
# Basic plot to see which eigensolutions have been found.
show_quick_plot = False
#show_quick_plot = True
# Video resolution
#res = (1920,1080) # There is a problem with this resolution- height must be odd number - Mayavi bug apparently
res = tuple(101 * np.array((16,9)))
#res = tuple(51 * np.array((16,9)))
#res = tuple(21 * np.array((16,9)))
number_of_frames = 1
# Frames per second of output video
fps = 20
#save_images = False
save_images = True
make_video = False
#make_video = True
# Where should I save the animation images/videos?
os.path.abspath(os.curdir)
os.chdir('..')
save_directory = os.path.join(os.path.abspath(os.curdir), '3D_vis_animations')
# Where should I save the dispersion diagrams?
save_dispersion_diagram_directory = os.path.join(os.path.abspath(os.curdir), '3D_vis_dispersion_diagrams')
# ================================
# Visualisation set-up
# ================================
# Variable definitions (for reference):
# x = k*x
# y = k*y
# z = k*z
# W = omega/k
# K = k*x_0
# t = omega*t
# Loop through selected modes
for mode_ind in [0]:#range(8,14): # for all others. REMEMBER SBB pparameters
#for mode_ind in [14,15]: #for fast body surf. REMEMBER SBS parameters
#for mode_ind in [16, 17]:
#for mode_ind in [13]: #for an individual mode
#for mode_ind in range(2,14):
if mode_ind not in range(len(mode_options)):
raise NameError('Mode not in mode_options')
# (note that fast surface modes, i.e. 14 and 15, can only be
# found with SBS parameters in slab_functions...)
mode = mode_options[mode_ind]
# Specify oscillation parameters
if 'slow' in mode and 'surf' in mode or 'alfven' in mode:
K = 2.
elif 'slow' in mode and 'body' in mode:
K = 8.
elif 'fast' in mode and 'body-1' in mode:
K = 8.
elif 'fast' in mode and 'body-2' in mode:
K = 15.
elif 'fast' in mode and 'body-3' in mode:
K = 22.
elif 'fast' in mode and 'surf' in mode:
K = 8.
else:
raise NameError('Mode not found')
# Specify density ratio R1 := rho_1 / rho_0
# R1 = 1.5 # Higher denisty on left than right
# R1 = 1.8
# R1 = 1.9 # Disp_diagram will only work for R1=1.5, 1.8, 2.0
R1 = 2. # Symmetric slab
# Reduce number of variables in dispersion relation
disp_rel_partial = partial(sf.disp_rel_asym, R1=R1)
# find eigenfrequencies W (= omega/k) within the range Wrange for the given parameters.
Wrange1 = np.linspace(0., sf.cT, 11)
Wrange2 = np.linspace(sf.cT, sf.c0, 401)
Wrange3 = np.linspace(sf.c0, sf.c2, 11)
Woptions_slow_surf = np.real(tool.point_find(disp_rel_partial, np.array(K), Wrange1, args=None).transpose())
Woptions_slow_body = np.real(tool.point_find(disp_rel_partial, np.array(K), Wrange2, args=None).transpose())
Woptions_fast = np.real(tool.point_find(disp_rel_partial, np.array(K), Wrange3, args=None).transpose())
# Remove W values that are very close to characteristic speeds - these are spurious solutions
tol = 1e-2
indices_to_rm = []
for i, w in enumerate(Woptions_slow_surf):
spurious_roots_diff = abs(np.array([w, w - sf.c0, w - sf.c1(R1), w - sf.c2, w - sf.vA]))
if min(spurious_roots_diff) < tol or w < 0 or w > sf.cT:
indices_to_rm.append(i)
Woptions_slow_surf = np.delete(Woptions_slow_surf, indices_to_rm)
Woptions_slow_surf.sort()
indices_to_rm = []
for i, w in enumerate(Woptions_slow_body):
spurious_roots_diff = abs(np.array([w, w - sf.c0, w - sf.c1(R1), w - sf.c2, w - sf.vA]))
if min(spurious_roots_diff) < tol or w < sf.cT or w > sf.c0:
indices_to_rm.append(i)
Woptions_slow_body = np.delete(Woptions_slow_body, indices_to_rm)
Woptions_slow_body.sort()
indices_to_rm = []
for i, w in enumerate(Woptions_fast):
spurious_roots_diff = abs(np.array([w, w - sf.c0, w - sf.c1(R1), w - sf.c2, w - sf.vA]))
if min(spurious_roots_diff) < tol or w < sf.c0 or w > min(sf.c1, sf.c2):
indices_to_rm.append(i)
Woptions_fast = np.delete(Woptions_fast, indices_to_rm)
Woptions_fast.sort()
# remove any higher order slow body modes - we only want to do the first 3 saus/kink
if len(Woptions_slow_body) > 6:
Woptions_slow_body = np.delete(Woptions_slow_body, range(len(Woptions_slow_body) - 6))
Woptions = np.concatenate((Woptions_slow_surf, Woptions_slow_body, Woptions_fast))
# set W to be the eigenfrequency for the requested mode
if 'fast-saus-body' in mode or 'fast-kink-surf' in mode:
W = Woptions_fast[-2]
elif 'fast-kink-body' in mode or 'fast-saus-surf' in mode:
W = Woptions_fast[-1]
elif 'slow' in mode and 'surf' in mode:
W = Woptions_slow_surf[mode_ind]
elif 'slow' in mode and 'body' in mode:
W = Woptions_slow_body[mode_ind-2]
if 'alfven' in mode:
W = sf.vA
else:
W = np.real(W)
# Quick plot to see if we are hitting correct mode
if show_quick_plot:
plt.plot([K] * len(Woptions), Woptions, '.')
plt.plot(K+0.5, W, 'go')
plt.xlim([0,23])
plt.show()
# ================================
# Dispersion diagram
# ================================
if show_dispersion:
if 'alfven' in mode:
raise NameError('Disperion plot requested for an alfven mode. Cant do that.')
dispersion_diagram.dispersion_diagram(mode_options, mode,
disp_rel_partial, K, W, R1)
# plt.tight_layout() # seems to make it chop the sides off with this
plt.savefig(os.path.join(save_dispersion_diagram_directory, 'R1_' + str(R1) + '_' + mode + '.png') )
plt.close()
# ================================
# Animation
# ================================
if show_animation:
print('Starting ' + mode)
# set grid parameters
xmin = -2.*K
xmax = 2.*K
ymin = 0.
ymax = 4.
zmin = 0.
zmax = 2*np.pi
# You can change ny but be careful changing nx, nz.
nx = 300#100 #100 #300 gives us reduced bouncing of field lines for the same video size, but there is significant computational cost.
ny = 300#100 #100 #100#20 #100
nz = 300#100 #100
nt = number_of_frames
if nz % nt != 0:
print("nt doesnt divide nz so there may be a problem with chopping in z direction for each time step")
t_start = 0.
t_end = zmax
t = t_start
xvals = np.linspace(xmin, xmax, nx)
yvals = np.linspace(ymin, ymax, ny)
zvals = np.linspace(zmin, zmax, nz, endpoint=False) # A fudge to give the height as exactly one wavelength
x_spacing = max(nx, ny, nz) / nx
y_spacing = max(nx, ny, nz) / ny
z_spacing = max(nx, ny, nz) / nz
# For masking points for plotting vector fields- have to do it manually due to Mayavi bug
mod = int(4 * nx / 100)
mod_y = int(np.ceil(mod / y_spacing))
# Get the data xi=displacement, v=velocity, b=mag field
if show_disp_top or show_disp_front:
xixvals = np.real(np.repeat(sf.xix(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
xizvals = np.real(np.repeat(sf.xiz(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
xiyvals = np.real(np.repeat(sf.xiy(mode, xvals, zvals, t, W, K)[:, :, np.newaxis], ny, axis=2))
if show_vel_front or show_vel_top:
vxvals = np.real(np.repeat(sf.vx(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
vzvals = np.real(np.repeat(sf.vz(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
vyvals = np.real(np.repeat(sf.vy(mode, xvals, zvals, t, K)[:, :, np.newaxis], ny, axis=2))
if show_vel_front_pert or show_vel_top_pert:
vxvals = np.real(np.repeat(sf.vx_pert(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
vzvals = np.real(np.repeat(sf.vz_pert(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
vyvals = np.zeros_like(vxvals)
# Axis is defined on the mag field so we have to set up this data
bxvals = np.real(np.repeat(sf.bx(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
byvals = np.real(np.repeat(sf.by(mode, xvals, zvals, t, K)[:, :, np.newaxis], ny, axis=2))
bz_eq3d = np.repeat(sf.bz_eq(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2)
bzvals = np.real(np.repeat(-sf.bz(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2) +
bz_eq3d)
# displacement at the right and left boundaries
if show_boundary:
xix_boundary_r_vals = np.real(np.repeat(K + sf.xix_boundary(mode, zvals, t, W, K, R1, boundary='r')[:, np.newaxis], ny, axis=1))
xix_boundary_l_vals = np.real(np.repeat(-K + sf.xix_boundary(mode, zvals, t, W, K, R1, boundary='l')[:, np.newaxis], ny, axis=1))
if show_density:
rho_vals = np.real(np.repeat(sf.rho(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
if show_density_pert:
rho_vals = np.real(np.repeat(sf.rho_pert(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
bxvals_t = bxvals
byvals_t = byvals
bzvals_t = bzvals
if show_disp_top or show_disp_front:
xixvals_t = xixvals
xiyvals_t = xiyvals
xizvals_t = xizvals
if show_vel_top or show_vel_top_pert or show_vel_front or show_vel_front_pert:
vxvals_t = vxvals
vyvals_t = vyvals
vzvals_t = vzvals
if show_boundary:
xix_boundary_r_vals_t = xix_boundary_r_vals
xix_boundary_l_vals_t = xix_boundary_l_vals
if show_density or show_density_pert:
rho_vals_t = rho_vals
# ================================
# Starting figure and visualisation modules
# ================================
zgrid_zy, ygrid_zy = np.mgrid[0:nz:(nz)*1j,
0:ny:(ny)*1j]
fig = mlab.figure(size=res) # (1920, 1080) for 1080p , tuple(101 * np.array((16,9))) #16:9 aspect ratio for video upload
# Spacing of grid so that we can display a visualisation cube without having the same number of grid points in each dimension
spacing = np.array([x_spacing, z_spacing, y_spacing])
if show_density or show_density_pert:
# Scalar field density
rho = mlab.pipeline.scalar_field(rho_vals_t, name="density", figure=fig)
rho.spacing = spacing
mpf.volume_red_blue(rho, rho_vals_t)
#Masking points
if show_mag_vec:
bxvals_mask_front_t, byvals_mask_front_t, bzvals_mask_front_t = mpf.mask_points(bxvals_t, byvals_t, bzvals_t,
'front', mod, mod_y)
if show_disp_top:
xixvals_mask_top_t, xiyvals_mask_top_t, xizvals_mask_top_t = mpf.mask_points(xixvals_t, xiyvals_t, xizvals_t,
'top', mod, mod_y)
if show_disp_front:
xixvals_mask_front_t, xiyvals_mask_front_t, xizvals_mask_front_t = mpf.mask_points(xixvals_t, xiyvals_t, xizvals_t,
'front', mod, mod_y)
if show_vel_top or show_vel_top_pert:
vxvals_mask_top_t, vyvals_mask_top_t, vzvals_mask_top_t = mpf.mask_points(vxvals_t, vyvals_t, vzvals_t,
'top', mod, mod_y)
if show_vel_front or show_vel_front_pert:
vxvals_mask_front_t, vyvals_mask_front_t, vzvals_mask_front_t = mpf.mask_points(vxvals_t, vyvals_t, vzvals_t,
'front', mod, mod_y)
xgrid, zgrid, ygrid = np.mgrid[0:nx:(nx)*1j,
0:nz:(nz)*1j,
0:ny:(ny)*1j]
field = mlab.pipeline.vector_field(bxvals_t, bzvals_t, byvals_t, name="B field",
figure=fig, scalars=zgrid)
field.spacing = spacing
if show_axes:
mpf.axes_no_label(field)
if show_mini_axis:
mpf.mini_axes()
if uniform_light:
#uniform lighting, but if we turn shading of volumes off, we are ok without
mpf.uniform_lighting(fig)
#Black background
mpf.background_colour(fig, (0., 0., 0.))
scalefactor = 8. * nx / 100. # scale factor for direction field vectors
# Set up visualisation modules
if show_mag_vec:
bdirfield_front = mlab.pipeline.vector_field(bxvals_mask_front_t, bzvals_mask_front_t,
byvals_mask_front_t, name="B field front",
figure=fig)
bdirfield_front.spacing = spacing
mpf.vector_cut_plane(bdirfield_front, 'front', nx, ny, nz,
y_spacing, scale_factor=scalefactor)
if show_vel_top or show_vel_top_pert:
vdirfield_top = mlab.pipeline.vector_field(vxvals_mask_top_t, np.zeros_like(vxvals_mask_top_t),
vyvals_mask_top_t, name="V field top",
figure=fig)
vdirfield_top.spacing = spacing
mpf.vector_cut_plane(vdirfield_top, 'top', nx, ny, nz,
y_spacing, scale_factor=scalefactor)
if show_vel_front or show_vel_front_pert:
vdirfield_front = mlab.pipeline.vector_field(vxvals_mask_front_t, vzvals_mask_front_t,
vyvals_mask_front_t, name="V field front",
figure=fig)
vdirfield_front.spacing = spacing
mpf.vector_cut_plane(vdirfield_front,'front', nx, ny, nz,
y_spacing, scale_factor=scalefactor)
if show_disp_top:
xidirfield_top = mlab.pipeline.vector_field(xixvals_mask_top_t, np.zeros_like(xixvals_mask_top_t),
xiyvals_mask_top_t, name="Xi field top",
figure=fig)
xidirfield_top.spacing = spacing
mpf.vector_cut_plane(xidirfield_top, 'top', nx, ny, nz,
y_spacing, scale_factor=scalefactor)
if show_disp_front:
xidirfield_front = mlab.pipeline.vector_field(xixvals_mask_front_t, xizvals_mask_front_t,
xiyvals_mask_front_t, name="Xi field front",
figure=fig)
xidirfield_front.spacing = spacing
mpf.vector_cut_plane(xidirfield_front, 'front', nx, ny, nz,
y_spacing, scale_factor=scalefactor)
# Loop through time
for t_ind in range(nt):
if t_ind == 0:
bxvals_t = bxvals
byvals_t = byvals
bzvals_t = bzvals
if show_disp_top or show_disp_front:
xixvals_t = xixvals
xiyvals_t = xiyvals
xizvals_t = xizvals
if show_vel_top or show_vel_top_pert or show_vel_front or show_vel_front_pert:
vxvals_t = vxvals
vyvals_t = vyvals
vzvals_t = vzvals
if show_boundary:
xix_boundary_r_vals_t = xix_boundary_r_vals
xix_boundary_l_vals_t = xix_boundary_l_vals
if show_density or show_density_pert:
rho_vals_t = rho_vals
else:
bxvals = np.real(np.repeat(sf.bx(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
byvals = np.real(np.repeat(sf.by(mode, xvals, zvals, t, K)[:, :, np.newaxis], ny, axis=2))
bz_eq3d = np.repeat(sf.bz_eq(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2)
bzvals = np.real(np.repeat(-sf.bz(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2) +
bz_eq3d)
bxvals_t = bxvals
byvals_t = byvals
bzvals_t = bzvals
# Update mag field data
field.mlab_source.set(u=bxvals_t, v=bzvals_t, w=byvals_t)
# Update mag field visualisation module
if show_mag_vec:
bxvals_mask_front_t, byvals_mask_front_t, bzvals_mask_front_t = mpf.mask_points(bxvals_t, byvals_t, bzvals_t,
'front', mod, mod_y)
bdirfield_front.mlab_source.set(u=bxvals_mask_front_t, v=bzvals_mask_front_t, w=byvals_mask_front_t)
# Update displacement field data
if show_disp_top or show_disp_front:
xixvals_split = np.split(xixvals, [nz - (nz / nt) * t_ind], axis=1)
xiyvals_split = np.split(xiyvals, [nz - (nz / nt) * t_ind], axis=1)
xizvals_split = np.split(xizvals, [nz - (nz / nt) * t_ind], axis=1)
xixvals_t = np.concatenate((xixvals_split[1], xixvals_split[0]), axis=1)
xiyvals_t = np.concatenate((xiyvals_split[1], xiyvals_split[0]), axis=1)
xizvals_t = np.concatenate((xizvals_split[1], xizvals_split[0]), axis=1)
# Update displacement field visualisation module
if show_disp_top:
xixvals_mask_top_t, xiyvals_mask_top_t, xizvals_mask_top_t = mpf.mask_points(xixvals_t, xiyvals_t, xizvals_t,
'top', mod, mod_y)
xidirfield_top.mlab_source.set(u=xixvals_mask_top_t, v=np.zeros_like(xixvals_mask_top_t), w=xiyvals_mask_top_t)
if show_disp_front:
xixvals_mask_front_t, xiyvals_mask_front_t, xizvals_mask_front_t = mpf.mask_points(xixvals_t, xiyvals_t, xizvals_t,
'front', mod, mod_y)
xidirfield_front.mlab_source.set(u=xixvals_mask_front_t, v=xizvals_mask_front_t, w=xiyvals_mask_front_t)
# Update velocity field data
if show_vel_top or show_vel_top_pert or show_vel_front or show_vel_front_pert:
vxvals_split = np.split(vxvals, [nz - (nz / nt) * t_ind], axis=1)
vyvals_split = np.split(vyvals, [nz - (nz / nt) * t_ind], axis=1)
vzvals_split = np.split(vzvals, [nz - (nz / nt) * t_ind], axis=1)
vxvals_t = np.concatenate((vxvals_split[1], vxvals_split[0]), axis=1)
vyvals_t = np.concatenate((vyvals_split[1], vyvals_split[0]), axis=1)
vzvals_t = np.concatenate((vzvals_split[1], vzvals_split[0]), axis=1)
# Update velocity field visualisation module
if show_vel_top or show_vel_top_pert:
vxvals_mask_top_t, vyvals_mask_top_t, vzvals_mask_top_t = mpf.mask_points(vxvals_t, vyvals_t, vzvals_t,
'top', mod, mod_y)
vdirfield_top.mlab_source.set(u=vxvals_mask_top_t, v=np.zeros_like(vxvals_mask_top_t), w=vyvals_mask_top_t)
if show_vel_front or show_vel_front_pert:
vxvals_mask_front_t, vyvals_mask_front_t, vzvals_mask_front_t = mpf.mask_points(vxvals_t, vyvals_t, vzvals_t,
'front', mod, mod_y)
vdirfield_front.mlab_source.set(u=vxvals_mask_front_t, v=vzvals_mask_front_t, w=vyvals_mask_front_t)
# Update boundary displacement data
if show_boundary:
xix_boundary_r_vals_split = np.split(xix_boundary_r_vals, [nz - (nz / nt) * t_ind], axis=0)
xix_boundary_l_vals_split = np.split(xix_boundary_l_vals, [nz - (nz / nt) * t_ind], axis=0)
xix_boundary_r_vals_t = np.concatenate((xix_boundary_r_vals_split[1], xix_boundary_r_vals_split[0]), axis=0)
xix_boundary_l_vals_t = np.concatenate((xix_boundary_l_vals_split[1], xix_boundary_l_vals_split[0]), axis=0)
# Update density data
if show_density or show_density_pert:
rho_vals_split = np.split(rho_vals, [nz - (nz / nt) * t_ind], axis=1)
rho_vals_t = np.concatenate((rho_vals_split[1], rho_vals_split[0]), axis=1)
rho.mlab_source.set(scalars=rho_vals_t)
# Boundary data - Letting mayavi know where to plot the boundary
if show_boundary:
ext_min_r = ((nx) * (xix_boundary_r_vals_t.min() - xmin) / (xmax - xmin)) * x_spacing
ext_max_r = ((nx) * (xix_boundary_r_vals_t.max() - xmin) / (xmax - xmin)) * x_spacing
ext_min_l = ((nx) * (xix_boundary_l_vals_t.min() - xmin) / (xmax - xmin)) * x_spacing
ext_max_l = ((nx) * (xix_boundary_l_vals_t.max() - xmin) / (xmax - xmin)) * x_spacing
#Make field lines
if show_mag:
# move seed points up with phase speed. - Bit of a fudge.
# Create an array of points for which we want mag field seeds
nx_seed = 9
ny_seed = 13
start_x = 30. * nx / 100.
end_x = nx+1 - start_x
start_y = 1.
if ny == 20: # so that the lines dont go right up to the edge of the box
end_y = ny - 1.
elif ny == 100:
end_y = ny - 2.
elif ny == 300:
end_y = ny - 6.
else:
end_y = ny - 1
seeds=[]
dx_res = (end_x - start_x) / (nx_seed-1)
dy_res = (end_y - start_y) / (ny_seed-1)
for j in range(ny_seed):
for i in range(nx_seed):
x = start_x + (i * dx_res) * x_spacing
y = start_y + (j * dy_res) * y_spacing
z = 1. + (t_start + t_ind*(t_end - t_start)/nt)/zmax * nz
seeds.append((x,z,y))
if 'alfven' in mode:
for i in range(nx_seed):
del seeds[0]
del seeds[-1]
# Remove previous field lines - field lines cannot be updated, just the data that they are built from
if t_ind != 0:
field_lines.remove() # field_lines is defined in first go through loop
field_lines = SeedStreamline(seed_points=seeds)
# Field line visualisation tinkering
field_lines.stream_tracer.integration_direction='both'
field_lines.streamline_type = 'tube'
field_lines.stream_tracer.maximum_propagation = nz * 2
field_lines.tube_filter.number_of_sides = 20
field_lines.tube_filter.radius = 0.7 * max(nx, ny, nz) / 100.
field_lines.tube_filter.capping = True
field_lines.actor.property.opacity = 1.0
field.add_child(field_lines)
module_manager = field_lines.parent
# Colormap of magnetic field strength plotted on the field lines
if show_mag_scale:
module_manager.scalar_lut_manager.lut_mode = 'coolwarm'
module_manager.scalar_lut_manager.data_range=[7,18]
else:
mag_lut = module_manager.scalar_lut_manager.lut.table.to_array()
mag_lut[:,0] = [220]*256
mag_lut[:,1] = [20]*256
mag_lut[:,2] = [20]*256
module_manager.scalar_lut_manager.lut.table = mag_lut
if show_mag_fade:
mpf.colormap_fade(module_manager, fade_value=20)
# Which views do you want to show? Options are defined at the start
views_selected = [0]#[0,1,4,5,6] #range(7) #[2,3]
for view_ind, view_selected in enumerate(views_selected):
view = view_options[view_selected]
# Display boundary - cannot be updated each time
if show_boundary:
# Boundaries should look different depending on view
if view == 'front-parallel':
#remove previous boundaries
if t != 0 or view_ind != 0:
boundary_r.remove()
boundary_l.remove()
# Make a fading colormap by changing opacity at ends
lut = np.reshape(np.array([150, 150, 150, 255]*256), (256,4))
fade_value = 125
lut[:fade_value,-1] = np.linspace(0, 255, fade_value)
lut[-fade_value:,-1] = np.linspace(255, 0, fade_value)
# Set up boundary visualisation
boundary_r = mlab.mesh(xix_boundary_r_vals_t, zgrid_zy, ygrid_zy,
extent=[ext_min_r, ext_max_r, 1, nz, 0, (ny-1) * y_spacing],
opacity=1., representation='wireframe',
line_width=12., scalars=zgrid_zy)
boundary_l = mlab.mesh(xix_boundary_l_vals_t, zgrid_zy, ygrid_zy,
extent=[ext_min_l, ext_max_l, 1, nz, 0, (ny-1) * y_spacing],
opacity=1., representation='wireframe',
line_width=12., scalars=zgrid_zy)
# Boundary color and other options
boundary_r.module_manager.scalar_lut_manager.lut.table = lut
boundary_l.module_manager.scalar_lut_manager.lut.table = lut
boundary_r.actor.property.lighting = False
boundary_r.actor.property.shading = False
boundary_l.actor.property.lighting = False
boundary_l.actor.property.shading = False
else:
#remove previous boundaries
if t != 0 or view_ind != 0:
boundary_r.remove()
boundary_l.remove()
# Make a fading colormap by changing opacity at ends
lut = np.reshape(np.array([150, 150, 150, 255]*256), (256,4))
fade_value = 20
lut[:fade_value,-1] = np.linspace(0, 255, fade_value)
lut[-fade_value:,-1] = np.linspace(255, 0, fade_value)
# Set up boundary visualisation
boundary_r = mlab.mesh(xix_boundary_r_vals_t, zgrid_zy, ygrid_zy,
extent=[ext_min_r, ext_max_r, 1, nz, 0, (ny-1) * y_spacing],
opacity=0.7, scalars=zgrid_zy)
boundary_l = mlab.mesh(xix_boundary_l_vals_t, zgrid_zy, ygrid_zy,
extent=[ext_min_l, ext_max_l, 1, nz, 0, (ny-1) * y_spacing],
opacity=0.7, scalars=zgrid_zy)
# Boundary color and other options
boundary_r.module_manager.scalar_lut_manager.lut.table = lut
boundary_l.module_manager.scalar_lut_manager.lut.table = lut
boundary_r.actor.property.lighting = False
boundary_r.actor.property.shading = False
boundary_l.actor.property.lighting = False
boundary_l.actor.property.shading = False
# Set viewing angle - For some unknown reason we must redefine the camera position each time.
# This is something to do with the boundaries being replaced each time.
mpf.view_position(fig, view, nx, ny, nz)
if save_images:
prefix = 'R1_'+str(R1) + '_' + mode + '_' + vis_mod_string + view + '_'# + '_norho_'
mlab.savefig(os.path.join(save_directory, prefix + str(t_ind+1) + '.png'))
if t_ind == nt - 1:
if make_video:
i2v.image2video(filepath=save_directory, prefix=prefix,
output_name=prefix+'video', out_extension='mp4',
fps=fps, n_loops=4, delete_images=True,
delete_old_videos=True, res=res[1])
# Log: to keep us updated with progress
if t_ind % 5 == 4:
print('Finished frame number ' + str(t_ind + 1) + ' out of ' + str(number_of_frames))
#Release some memory after each time step
gc.collect()
#step t forward
t = t + (t_end - t_start) / nt
# Close Mayavi window each time if we cant to make a video
if make_video:
mlab.close(fig)
print('Finished ' + mode) | mit |
iShoto/testpy | codes/20200104_metric_learning_mnist/src/train_mnist_original_center.py | 1 | 5545 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd.function import Function
import torchvision
import os
import matplotlib.pyplot as plt
import argparse
from tqdm import trange
import numpy as np
from sklearn.metrics import classification_report
from losses import CenterLoss
from mnist_net import Net
import mnist_loader
# cf. https://cpp-learning.com/center-loss/
def main():
args = parse_args()
# Dataset
train_loader, test_loader, classes = mnist_loader.load_dataset(args.dataset_dir, img_show=True)
# Device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Model
model = Net().to(device)
print(model)
# Loss
nllloss = nn.NLLLoss().to(device) # CrossEntropyLoss = log_softmax + NLLLoss
loss_weight = 1
centerloss = CenterLoss(10, 2).to(device)
# Optimizer
dnn_optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.0005)
sheduler = lr_scheduler.StepLR(dnn_optimizer, 20, gamma=0.8)
center_optimizer = optim.SGD(centerloss.parameters(), lr =0.5)
print('Start training...')
for epoch in range(100):
# Update parameters.
epoch += 1
sheduler.step()
# Train and test a model.
train_acc, train_loss, feat, labels = train(device, train_loader, model, nllloss, loss_weight, centerloss, dnn_optimizer, center_optimizer)
test_acc, test_loss = test(device, test_loader, model, nllloss, loss_weight, centerloss)
stdout_temp = 'Epoch: {:>3}, train acc: {:<8}, train loss: {:<8}, test acc: {:<8}, test loss: {:<8}'
print(stdout_temp.format(epoch, train_acc, train_loss, test_acc, test_loss))
# Visualize features of each class.
vis_img_path = args.vis_img_path_temp.format(str(epoch).zfill(3))
visualize(feat.data.cpu().numpy(), labels.data.cpu().numpy(), epoch, vis_img_path)
# Save a trained model.
model_path = args.model_path_temp.format(str(epoch).zfill(3))
torch.save(model.state_dict(), model_path)
def train(device, train_loader, model, nllloss, loss_weight, centerloss, dnn_optimizer, center_optimizer):
running_loss = 0.0
pred_list = []
label_list = []
ip1_loader = []
idx_loader = []
model.train()
for i,(imgs, labels) in enumerate(train_loader):
# Set batch data.
imgs, labels = imgs.to(device), labels.to(device)
# Predict labels.
ip1, pred = model(imgs)
# Calculate loss.
loss = nllloss(pred, labels) + loss_weight * centerloss(labels, ip1)
# Initilize gradient.
dnn_optimizer.zero_grad()
center_optimizer.zero_grad()
# Calculate gradient.
loss.backward()
# Update parameters.
dnn_optimizer.step()
center_optimizer.step()
# For calculation.
running_loss += loss.item()
pred_list += [int(p.argmax()) for p in pred]
label_list += [int(l) for l in labels]
# For visualization.
ip1_loader.append(ip1)
idx_loader.append((labels))
# Calculate training accurary and loss.
result = classification_report(pred_list, label_list, output_dict=True)
train_acc = round(result['weighted avg']['f1-score'], 6)
train_loss = round(running_loss / len(train_loader.dataset), 6)
# Concatinate features and labels.
feat = torch.cat(ip1_loader, 0)
labels = torch.cat(idx_loader, 0)
return train_acc, train_loss, feat, labels
def test(device, test_loader, model, nllloss, loss_weight, centerloss):
model = model.eval()
# Prediciton
running_loss = 0.0
pred_list = []
label_list = []
for i,(imgs, labels) in enumerate(test_loader):
with torch.no_grad():
# Set batch data.
imgs, labels = imgs.to(device), labels.to(device)
# Predict labels.
ip1, pred = model(imgs)
# Calculate loss.
loss = nllloss(pred, labels) + loss_weight * centerloss(labels, ip1)
# Append predictions and labels.
running_loss += loss.item()
pred_list += [int(p.argmax()) for p in pred]
label_list += [int(l) for l in labels]
# Calculate accuracy.
result = classification_report(pred_list, label_list, output_dict=True)
test_acc = round(result['weighted avg']['f1-score'], 6)
test_loss = round(running_loss / len(test_loader.dataset), 6)
return test_acc, test_loss
def visualize(feat, labels, epoch, vis_img_path):
colors = ['#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff',
'#ff00ff', '#990000', '#999900', '#009900', '#009999']
plt.figure()
for i in range(10):
plt.plot(feat[labels==i, 0], feat[labels==i, 1], '.', color=colors[i])
plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], loc='best')
plt.xlim(left=-8, right=8)
plt.ylim(bottom=-8, top=8)
plt.text(-7.8, 7.3, "epoch=%d" % epoch)
plt.savefig(vis_img_path)
plt.clf()
def parse_args():
arg_parser = argparse.ArgumentParser(description="parser for focus one")
arg_parser.add_argument("--dataset_dir", type=str, default='../inputs/')
arg_parser.add_argument("--model_dir", type=str, default='../outputs/models/checkpoints/')
arg_parser.add_argument("--model_path_temp", type=str, default='../outputs/models/checkpoints/mnist_original_softmax_center_epoch_{}.pth')
arg_parser.add_argument("--vis_img_dir", type=str, default='../outputs/visual/')
arg_parser.add_argument("--vis_img_path_temp", type=str, default='../outputs/visual/epoch_{}.png')
args = arg_parser.parse_args()
os.makedirs(args.dataset_dir, exist_ok=True)
os.makedirs(args.model_dir, exist_ok=True)
os.makedirs(args.vis_img_dir, exist_ok=True)
return args
if __name__ == "__main__":
main()
| mit |
reinaH/osf.io | scripts/analytics/email_invites.py | 55 | 1332 | # -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from utils import plot_dates, mkdirp
user_collection = database['user']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'features')
mkdirp(FIG_PATH)
def analyze_email_invites():
invited = user_collection.find({'unclaimed_records': {'$ne': {}}})
dates_invited = [
user['date_registered']
for user in invited
]
if not dates_invited:
return
fig = plot_dates(dates_invited)
plt.title('email invitations ({}) total)'.format(len(dates_invited)))
plt.savefig(os.path.join(FIG_PATH, 'email-invites.png'))
plt.close()
def analyze_email_confirmations():
confirmed = user_collection.find({
'unclaimed_records': {'$ne': {}},
'is_claimed': True,
})
dates_confirmed = [
user['date_confirmed']
for user in confirmed
]
if not dates_confirmed:
return
fig = plot_dates(dates_confirmed)
plt.title('confirmed email invitations ({}) total)'.format(len(dates_confirmed)))
plt.savefig(os.path.join(FIG_PATH, 'email-invite-confirmations.png'))
plt.close()
def main():
analyze_email_invites()
analyze_email_confirmations()
if __name__ == '__main__':
main()
| apache-2.0 |
roshchupkin/hase | tools/VCF2hdf5.py | 1 | 4024 |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from config import PYTHON_PATH
if PYTHON_PATH is not None:
for i in PYTHON_PATH: sys.path.insert(0,i)
import argparse
import h5py
import pandas as pd
import numpy as np
from hdgwas.tools import Timer
import tables
import glob
def probes_VCF2hdf5(data_path, save_path,study_name, chunk_size=1000000):
if os.path.isfile(os.path.join(save_path,'probes',study_name+'.h5')):
os.remove(os.path.join(save_path,'probes',study_name+'.h5'))
hash_table={'keys':np.array([],dtype=np.int),'allele':np.array([])}
df=pd.read_csv(data_path,sep='\t',chunksize=chunk_size, header=None,index_col=None)
for i,chunk in enumerate(df):
print 'add chunk {}'.format(i)
print chunk.head()
chunk.columns=[ "CHR","bp" ,"ID",'allele1','allele2','QUAL','FILTER','INFO'] #TODO (high) parse INFO
hash_1=chunk.allele1.apply(hash)
hash_2=chunk.allele2.apply(hash)
k,indices=np.unique(np.append(hash_1,hash_2),return_index=True)
s=np.append(chunk.allele1,chunk.allele2)[indices]
ind=np.invert(np.in1d(k,hash_table['keys']))
hash_table['keys']=np.append(hash_table['keys'],k[ind])
hash_table['allele']=np.append(hash_table['allele'],s[ind])
chunk.allele1=hash_1
chunk.allele2=hash_2
chunk.to_hdf(os.path.join(save_path,'probes',study_name+'.h5'),data_columns=["CHR","bp" ,"ID",'allele1','allele2'], key='probes',format='table',append=True,
min_itemsize = 25, complib='zlib',complevel=9 )
pd.DataFrame.from_dict(hash_table).to_csv(os.path.join(save_path,'probes',study_name+'_hash_table.csv.gz'),index=False,compression='gzip', sep='\t')
def ind_VCF2hdf5(data_path, save_path,study_name):
if os.path.isfile(os.path.join(save_path,'individuals',study_name+'.h5')):
os.remove(os.path.join(save_path,'individuals',study_name+'.h5'))
n=[]
f=open(data_path,'r')
for i,j in enumerate(f):
n.append((j[:-1]))
f.close()
n=np.array(n)
chunk=pd.DataFrame.from_dict({"individual":n})
chunk.to_hdf(os.path.join(save_path,'individuals',study_name+'.h5'), key='individuals',format='table',
min_itemsize = 25, complib='zlib',complevel=9 )
def genotype_VCF2hdf5(data_path,id, save_path, study_name):
df=pd.read_csv(data_path, header=None, index_col=None,sep='\t', dtype=np.float16)
data=df.as_matrix()
print data.shape
print 'Saving chunk...{}'.format(os.path.join(save_path,'genotype',str(id)+'_'+study_name+'.h5'))
h5_gen_file = tables.open_file(
os.path.join(save_path,'genotype',str(id)+'_'+study_name+'.h5'), 'w', title=study_name)
atom = tables.Float16Atom()
genotype = h5_gen_file.create_carray(h5_gen_file.root, 'genotype', atom,
(data.shape),
title='Genotype',
filters=tables.Filters(complevel=9, complib='zlib'))
genotype[:] = data
h5_gen_file.close()
os.remove(data_path)
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Script to convert VCF data')
parser.add_argument("-study_name", required=True, type=str, help="Study specific name")
parser.add_argument("-id", type=str, help="subject id")
parser.add_argument("-data",required=True, type=str, help="path to file")
parser.add_argument("-out",required=True, type=str, help="path to results save folder")
parser.add_argument("-flag",required=True,type=str,choices=['individuals','probes','chunk'], help="path to file with SNPs info")
args = parser.parse_args()
print args
try:
print ('Creating directories...')
os.mkdir(os.path.join(args.out,'genotype') )
os.mkdir(os.path.join(args.out,'individuals') )
os.mkdir(os.path.join(args.out,'probes') )
os.mkdir(os.path.join(args.out,'tmp_files'))
except:
print('Directories "genotype","probes","individuals" are already exist in {}...'.format(args.out))
if args.flag=='probes':
probes_VCF2hdf5(args.data, args.out, args.study_name)
elif args.flag=='individuals':
ind_VCF2hdf5(args.data, args.out,args.study_name)
elif args.flag=='chunk':
genotype_VCF2hdf5(args.data,args.id, args.out,args.study_name)
| gpl-3.0 |
moutai/scikit-learn | sklearn/manifold/locally_linear.py | 37 | 25852 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=1):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1, n_jobs=n_jobs).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None, n_jobs=1):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float64)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
# build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1] *
U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
# find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
# choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
# find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
# calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
# find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
# Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float64)
for i in range(N):
s_i = s_range[i]
# select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
# compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
# Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
# Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
# We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h) +
(1 - alpha_i) * w_reg[i, :, None])
# Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
# We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg, n_jobs=self.n_jobs)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
moutai/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
google-research/disentanglement_lib | disentanglement_lib/data/ground_truth/cars3d.py | 1 | 4067 | # coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cars3D data set."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from disentanglement_lib.data.ground_truth import ground_truth_data
from disentanglement_lib.data.ground_truth import util
import numpy as np
import PIL
import scipy.io as sio
from six.moves import range
from sklearn.utils import extmath
from tensorflow.compat.v1 import gfile
CARS3D_PATH = os.path.join(
os.environ.get("DISENTANGLEMENT_LIB_DATA", "."), "cars")
class Cars3D(ground_truth_data.GroundTruthData):
"""Cars3D data set.
The data set was first used in the paper "Deep Visual Analogy-Making"
(https://papers.nips.cc/paper/5845-deep-visual-analogy-making) and can be
downloaded from http://www.scottreed.info/. The images are rescaled to 64x64.
The ground-truth factors of variation are:
0 - elevation (4 different values)
1 - azimuth (24 different values)
2 - object type (183 different values)
"""
def __init__(self):
self.factor_sizes = [4, 24, 183]
features = extmath.cartesian(
[np.array(list(range(i))) for i in self.factor_sizes])
self.latent_factor_indices = [0, 1, 2]
self.num_total_factors = features.shape[1]
self.index = util.StateSpaceAtomIndex(self.factor_sizes, features)
self.state_space = util.SplitDiscreteStateSpace(self.factor_sizes,
self.latent_factor_indices)
self.data_shape = [64, 64, 3]
self.images = self._load_data()
@property
def num_factors(self):
return self.state_space.num_latent_factors
@property
def factors_num_values(self):
return self.factor_sizes
@property
def observation_shape(self):
return self.data_shape
def sample_factors(self, num, random_state):
"""Sample a batch of factors Y."""
return self.state_space.sample_latent_factors(num, random_state)
def sample_observations_from_factors(self, factors, random_state):
"""Sample a batch of observations X given a batch of factors Y."""
all_factors = self.state_space.sample_all_factors(factors, random_state)
indices = self.index.features_to_index(all_factors)
return self.images[indices].astype(np.float32)
def _load_data(self):
dataset = np.zeros((24 * 4 * 183, 64, 64, 3))
all_files = [x for x in gfile.ListDirectory(CARS3D_PATH) if ".mat" in x]
for i, filename in enumerate(all_files):
data_mesh = _load_mesh(filename)
factor1 = np.array(list(range(4)))
factor2 = np.array(list(range(24)))
all_factors = np.transpose([
np.tile(factor1, len(factor2)),
np.repeat(factor2, len(factor1)),
np.tile(i,
len(factor1) * len(factor2))
])
indexes = self.index.features_to_index(all_factors)
dataset[indexes] = data_mesh
return dataset
def _load_mesh(filename):
"""Parses a single source file and rescales contained images."""
with gfile.Open(os.path.join(CARS3D_PATH, filename), "rb") as f:
mesh = np.einsum("abcde->deabc", sio.loadmat(f)["im"])
flattened_mesh = mesh.reshape((-1,) + mesh.shape[2:])
rescaled_mesh = np.zeros((flattened_mesh.shape[0], 64, 64, 3))
for i in range(flattened_mesh.shape[0]):
pic = PIL.Image.fromarray(flattened_mesh[i, :, :, :])
pic.thumbnail((64, 64, 3), PIL.Image.ANTIALIAS)
rescaled_mesh[i, :, :, :] = np.array(pic)
return rescaled_mesh * 1. / 255
| apache-2.0 |
Lawrence-Liu/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
mohitreddy1996/Gender-Detection-from-Signature | src/train_test/random_forests.py | 1 | 1140 | from sklearn.metrics import precision_recall_fscore_support
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import MinMaxScaler, normalize
df = pd.read_csv('../../Dataset/dataset.csv', delimiter='\t')
dataset = df.values
mask = np.random.rand(len(df)) < .80
train = df[mask]
test = df[~mask]
X = pd.DataFrame()
Y = pd.DataFrame()
X = train.ix[:, 2:len(train.columns) - 1]
Y = train.ix[:, len(train.columns) - 1: len(train.columns)]
X_Test = pd.DataFrame()
Y_Test = pd.DataFrame()
# After Normalising
X_standard = normalize(X)
print X_standard.shape
X_Test = test.ix[:, 2:len(test.columns) - 1]
Y_Test = test.ix[:, len(test.columns) - 1: len(test.columns)]
X_Test_standard = normalize(X_Test)
print X_Test_standard.shape
print "Training Data Set Size : ", str(len(X))
print "Testing Data Set Size : ", str(len(X_Test))
# tune parameters here.
rf = RandomForestClassifier(n_estimators=150, max_features=20)
rf.fit(X_standard, Y)
# predict
Y_Result = rf.predict(X_Test_standard)
print precision_recall_fscore_support(Y_Test, Y_Result, average='micro')
| mit |
AlexanderFabisch/scikit-learn | sklearn/decomposition/tests/test_pca.py | 21 | 11810 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggerred it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
if hasattr(pca, 'random_state'):
pca.random_state = rng
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components),
decimal=4)
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 1)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_, np.var(X_rpca, axis=0),
decimal=1)
# Same with correlated data
X = datasets.make_classification(n_samples, n_features,
n_informative=n_features-2,
random_state=rng)[0]
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 5)
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
eramirem/astroML | book_figures/chapter9/fig_photoz_tree.py | 3 | 3637 | """
Photometric Redshifts by Decision Trees
---------------------------------------
Figure 9.14
Photometric redshift estimation using decision-tree regression. The data is
described in Section 1.5.5. The training set consists of u, g , r, i, z
magnitudes of 60,000 galaxies from the SDSS spectroscopic sample.
Cross-validation is performed on an additional 6000 galaxies. The left panel
shows training error and cross-validation error as a function of the maximum
depth of the tree. For a number of nodes N > 13, overfitting is evident.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from astroML.datasets import fetch_sdss_specgals
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Fetch data and prepare it for the computation
data = fetch_sdss_specgals()
# put magnitudes in a matrix
mag = np.vstack([data['modelMag_%s' % f] for f in 'ugriz']).T
z = data['z']
# train on ~60,000 points
mag_train = mag[::10]
z_train = z[::10]
# test on ~6,000 separate points
mag_test = mag[1::100]
z_test = z[1::100]
#------------------------------------------------------------
# Compute the cross-validation scores for several tree depths
depth = np.arange(1, 21)
rms_test = np.zeros(len(depth))
rms_train = np.zeros(len(depth))
i_best = 0
z_fit_best = None
for i, d in enumerate(depth):
clf = DecisionTreeRegressor(max_depth=d, random_state=0)
clf.fit(mag_train, z_train)
z_fit_train = clf.predict(mag_train)
z_fit = clf.predict(mag_test)
rms_train[i] = np.mean(np.sqrt((z_fit_train - z_train) ** 2))
rms_test[i] = np.mean(np.sqrt((z_fit - z_test) ** 2))
if rms_test[i] <= rms_test[i_best]:
i_best = i
z_fit_best = z_fit
best_depth = depth[i_best]
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 2.5))
fig.subplots_adjust(wspace=0.25,
left=0.1, right=0.95,
bottom=0.15, top=0.9)
# first panel: cross-validation
ax = fig.add_subplot(121)
ax.plot(depth, rms_test, '-k', label='cross-validation')
ax.plot(depth, rms_train, '--k', label='training set')
ax.set_xlabel('depth of tree')
ax.set_ylabel('rms error')
ax.yaxis.set_major_locator(plt.MultipleLocator(0.01))
ax.set_xlim(0, 21)
ax.set_ylim(0.009, 0.04)
ax.legend(loc=1)
# second panel: best-fit results
ax = fig.add_subplot(122)
ax.scatter(z_test, z_fit_best, s=1, lw=0, c='k')
ax.plot([-0.1, 0.4], [-0.1, 0.4], ':k')
ax.text(0.04, 0.96, "depth = %i\nrms = %.3f" % (best_depth, rms_test[i_best]),
ha='left', va='top', transform=ax.transAxes)
ax.set_xlabel(r'$z_{\rm true}$')
ax.set_ylabel(r'$z_{\rm fit}$')
ax.set_xlim(-0.02, 0.4001)
ax.set_ylim(-0.02, 0.4001)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.1))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.1))
plt.show()
| bsd-2-clause |
vitale232/ves | ves/VESinverse_vectorized.py | 1 | 12839 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 16:32:48 2016
@author: jclark
this code uses the Ghosh method to determine the apparent resistivities
for a layered earth model. Either schlumberger or Wenner configurations
can be used
"""
import numpy as np
import random
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
plt.style.use('bmh')
import sys
# Schlumberger filter
fltr1 = [0., .00046256, -.0010907, .0017122, -.0020687,
.0043048, -.0021236, .015995, .017065, .098105, .21918, .64722,
1.1415, .47819, -3.515, 2.7743, -1.201, .4544, -.19427, .097364,
-.054099, .031729, -.019109, .011656, -.0071544, .0044042,
-.002715, .0016749, -.0010335, .00040124]
#Wenner Filter
fltr2 = [0., .000238935, .00011557, .00017034, .00024935,
.00036665, .00053753, .0007896, .0011584, .0017008, .0024959,
.003664, .0053773, .007893, .011583, .016998, .024934, .036558,
.053507, .078121, .11319, .16192, .22363, .28821, .30276, .15523,
-.32026, -.53557, .51787, -.196, .054394, -.015747, .0053941,
-.0021446, .000665125]
print(len(fltr1))
print(len(fltr2))
#I know there must be a better method to assign lists. And probably numpy
#arrays would be best. But my Python wasn't up to it. If the last letter
#is an 'l' that means it is a log10 of the value
# 65 is completely arbitrary
p = [0] * 20 # earth layer parameters?
r = [0] * 65 # apparent resistivty?
rl = [0] * 65 # np.log(r) ?
t = [0] * 50 #
b = [0] * 65 #
asav = [0] * 65 # voltage spacing in meters?
asavl = [0] * 65 # np.log(asav)
adatl = [0] * 65 # interpolated voltage spacing ( np.log(10) / 6 )?
rdatl = [0] * 65 # np.log()
# adat = [0] * 65 # voltage spacing input
# rdat = [0] * 65 # apparent res input
pkeep = [0] * 65 # earth parameters after applying equations?
rkeep = [0] * 65 # r after applying equations?
rkeepl = [0] * 65 # np.log()!
pltanswer = [0] * 65
pltanswerl = [0] * 65
pltanswerkeep = [0] * 65
pltanswerkeepl = [0] * 65
rl = [0] * 65
small = [0] * 65
xlarge = [0] * 65
x=[0] * 100
y = [0] * 100
y2 = [0] * 100
u = [0] * 5000
new_x = [0] * 1000
new_y = [0] * 1000
ndat = 13
#hard coded data input - spacing and apparent resistivities measured
#in teh field
adat = [0., 0.55, 0.95, 1.5, 2.5, 3., 4.5, 5.5, 9., 12., 20., 30., 70.]
rdat = [0., 125., 110., 95., 40., 24., 15., 10.5, 8., 6., 6.5, 11., 25.]
one30 = 1.e30 # What's the purpose of this and should it be user input?
rms = one30 # Just a starting value for rmserror?
errmin = 1.e10 # Should this be user input?
# INPUT
array_spacing = 'wenner' # 1 is for shchlumberger and 2 is for Wenner
nLayers = 3 #number of layers
n = 2 * nLayers - 1 # What does n represent? number of parameters
spac = 0.2 # smallest electrode spacing - should this come from the input file?
m = 20 # number of points where resistivity is calculated
spac = np.log(spac)
delx = np.log(10.0) / 6. # I take it this is the sample interval on the log scale?
# this is where the range in parameters should be input from a GUI
# I'm hard coding this in for now
#enter thickenss range for each layer and then resistivity range.
#for 3 layers small[1] and small[2] are low end of thickness range
# small[3], small[4] and small[5] are the low end of resistivities
# I think I have it coded up that these are getting grabbed from the rectangles currently.
# Is that the best way to go?
small[1] = 1.
small[2] = 10.
small[3] = 20.
small[4] = 2.
small[5] = 500.
xlarge[1] = 5
xlarge[2] = 75.
xlarge[3] = 200.
xlarge[4] = 100
xlarge[5] = 3000.
iter_ = 10000 #number of iterations for the Monte Carlo guesses. to be input on GUI
# Is 10000 the most reasonable default, or should I play with it?
def readData(adat, rdat, ndat, return_indexed=False):
#normally this is where the data would be read from the csv file
# but now I'm just hard coding it in as global lists
for i in range(1, ndat):
adatl[i] = np.log10(adat[i])
rdatl[i] = np.log10(rdat[i])
if return_indexed:
return adatl[:ndat], rdatl[:ndat]
else:
return adatl, rdatl
<<<<<<< HEAD
=======
def error(): # simple rms error calc
sumerror = 0.
#pltanswer = [0]*64
spline(m, one30, one30, asavl, rl, y2) # So this calculates the predicted fit?
# and essentially operates on the list in place?
for i in range(1, ndat): # So you always skip the value 0? due to -inf returns?
ans = splint(m, adatl[i], asavl, rl, y2) # Then this calulates error?
sumerror = sumerror + (rdatl[i] - ans) * (rdatl[i] - ans)
#print(i,sum1,rdat[i],rdatl[i],ans)
pltanswerl[i] = ans
pltanswer[i] = np.power(10, ans)
rms = np.sqrt(sumerror / (ndat - 1))
# check the spline routine
# for i in range(1,m+1,1):
# anstest = splint(m, asavl[i],asavl,rl,y2)
# print( asavl[i], rl[i], anstest)
#print(' rms = ', rms)
# if you erally want to get a good idea of all perdictions from Montecarlo
# perform the following plot (caution - change iter to a smaller number)
#plt.loglog(adat[1:ndat],pltanswer[1:ndat])
return rms
>>>>>>> 60497dd... ?s
def transf(y, i):
# these lines apparently find the computer precision ep
ep = 1.0
ep = ep / 2.0
fctr = ep + 1.
while fctr > 1.:
ep = ep / 2.0
fctr = ep + 1.
u = 1. / np.exp(y) # y = spac - 19. * delx - 0.13069
t[1] = p[n]
for j in range(2, nLayers + 1, 1):
pwr = -2. * u * p[nLayers + 1 - j]
if pwr < np.log(2. * ep):
pwr = np.log(2. * ep)
a = np.exp(pwr)
b = (1. - a) / (1. + a)
rs = p[n + 1 - j]
tpr = b * rs
t[j] = (tpr + t[j - 1]) / (1. + tpr * t[j - 1] / (rs * rs))
r[i] = t[nLayers]
return
def filters(b, k):
for i in range(1, m + 1):
re = 0.
for j in range(1, k + 1):
re = re + b[j] * r[i + k - j] # include ranges of thickness, res . push button for rmse error, observed data
# surf thicknes .2 - 100
# res 2-3000 # could use huge ranges at cost of time
r[i] = re
return
def rmsfit():
if array_spacing.lower() == 'wenner':
y = spac - 19. * delx - 0.13069
mum1 = m + 28
for i in range(1, mum1 + 1):
transf(y, i)
y = y + delx
filters(fltr1, 29)
elif array_spacing.lower() == 'schlumberger':
s = np.log(2.)
y = spac - 10.8792495 * delx
mum2 = m + 33
for i in range(1, mum2 + 1):
transf(y, i)
a = r[i]
y1 = y + s
transf(y1, i)
r[i] = 2. * a - r[i]
y = y + delx
filters(fltr2, 34)
else:
print("\nType of survey not indicated.")
raise SystemExit('Exiting.\n\n Take better care next time.')
x = spac
#print("A-Spacing App. Resistivity")
for i in range(1, m + 1):
a = np.exp(x)
asav[i] = a
asavl[i] = np.log10(a)
rl[i] = np.log10(r[i])
x = x + delx
#print("%7.2f %9.3f " % ( asav[i], r[i]))
rms = error()
return rms
def error(): # simple rms error calc
sumerror = 0.
#pltanswer = [0]*64
spline(m, one30, one30, asavl, rl, y2) # So this calculates the predicted fit?
# and essentially operates on the list in place?
for i in range(1, ndat): # So you always skip the value 0? due to -inf returns?
ans = splint(m, adatl[i], asavl, rl, y2) # Then this calulates error?
sumerror = sumerror + (rdatl[i] - ans) * (rdatl[i] - ans)
#print(i,sum1,rdat[i],rdatl[i],ans)
pltanswerl[i] = ans
pltanswer[i] = np.power(10, ans)
rms = np.sqrt(sumerror / (ndat - 1))
# check the spline routine
# for i in range(1,m+1,1):
# anstest = splint(m, asavl[i],asavl,rl,y2)
# print( asavl[i], rl[i], anstest)
#print(' rms = ', rms)
# if you erally want to get a good idea of all perdictions from Montecarlo
# perform the following plot (caution - change iter to a smaller number)
#plt.loglog(adat[1:ndat],pltanswer[1:ndat])
return rms
# my code to do a spline fit to predicted data at the nice spacing of Ghosh
# use splint to determine the spline interpolated prediction at the
# spacing where the measured resistivity was taken - to compare observation
# to prediction
def spline(n, yp1, ypn, x=[] ,y=[] ,y2=[]):
"""Still struggling to understand the general operation of this function."""
u = [0] * 1000
one29 = 0.99e30
#print(x,y)
if yp1 > one29:
y2[0] = 0.
u[0] = 0.
else:
y2[0] = -0.5
u[0] = (3. / (x[1] - x[0])) * ((y[1] - y[0]) / (x[1] - x[0]) - yp1)
for i in range(1, n):
#print(i,x[i])
sig = (x[i] - x[i-1]) / (x[i+1] - x[i-1])
p=sig * y2[i - 1] + 2.
y2[i] = (sig-1.) / p
u[i] = (((6. * ((y[i+1] - y[i]) / (x[i+1] - x[i]) - (y[i] - y[i-1]) /
x[i] - x[i-1])) / (x[i + 1] - x[i - 1]) - sig * u[i - 1]) / p)
if ypn > one29:
qn = 0.
un = 0.
else:
qn = 0.5
un = (3. / (x[n] - x[n - 1])) * (ypn - (y[n] - y[n - 1]) / (x[n] - x[n - 1]))
y2[n] = (un - qn * u[n - 1]) / (qn * y2[n - 1] + 1.)
for k in range(n-1, -1, -1):
y2[k] = y2[k] * y2[k + 1] + u[k]
return
def splint(n, x ,xa=[], ya=[], y2a=[]): # Is this function the T function?
"""Still struggling to understand the general operation of this function."""
klo = 0
khi = n
while khi - klo > 1:
k = int((khi + klo) // 2)
if xa[k] > x:
khi = k
else:
klo = k
h = xa[khi] - xa[klo]
if abs(h) < 1e-20:
print(" bad xa input")
#print(x,xa[khi],xa[klo])
a = (xa[khi] - x) / h
b = (x - xa[klo]) / h
y = (a * ya[klo] + b * ya[khi] + ((a * a * a - a) * y2a[klo] +
(b * b * b - b) * y2a[khi]) * (h * h) /6.)
#print("x= ", x,"y= ", y, " ya= ", ya[khi]," y2a= ", y2a[khi], " h= ",h)
return y
#main here
if __name__ == '__main__':
adatl, rdatl = readData(adat, rdat, ndat, return_indexed=False)
print(adat[1:ndat],rdat[1:ndat])
print('log stufffff')
print(adatl[1:ndat], rdatl[1:ndat]) # is this to skip 0?
#enter thickenss range for each layer and then resistivity range.
#for 3 layers small[1] and small[2] are low end of thickness range
# small[3], small[4] and small[5] are the low end of resistivities
for iloop in range(1, int(iter_/2) + 1):
#print( ' iloop is ', iloop)
for i in range(1, n + 1): # number of parameters + 1
randNumber = random.random() # IS this just to add noise to the model?
# #print(randNumber, ' random')
# print(xlarge)
# print(small)
# s = input('')
# print('xlarge[i]: {}, small[i]: {}'.format(xlarge[i], small[i]))
p[i] = (xlarge[i] - small[i]) * randNumber + small[i]
# print(p)
print('\n')
print(p)
# s = input('')
rms = rmsfit()
if rms < errmin:
print('rms ', rms, ' errmin ', errmin)
for i in range(1, n + 1):
pkeep[i] = p[i]
for i in range(1, m + 1):
rkeep[i] = r[i]
rkeepl[i] = rl[i]
for i in range(1, ndat + 1):
pltanswerkeepl[i] = pltanswerl[i]
pltanswerkeep[i] = pltanswer[i]
errmin = rms
#output the best fitting earth model
print(' Layer ', ' Thickness ', ' Res_ohm-m ')
for i in range(1,nLayers,1):
print(i, pkeep[i], pkeep[nLayers+i-1])
print( nLayers, ' Infinite ', pkeep[n])
for i in range(1,m+1, 1):
asavl[i] = np.log10(asav[i])
#output the error of fit
print( ' RMS error ', errmin)
print( ' Spacing', ' Res_pred ', ' Log10_spacing ', ' Log10_Res_pred ')
for i in range(1,m+1,1):
#print(asav[i], rkeep[i], asavl[i], rkeepl[i])
print("%7.2f %9.3f %9.3f %9.3f" % ( asav[i], rkeep[i],
asavl[i], rkeepl[i]))
print('plot a lot')
plt.loglog(asav[1:m],rkeep[1:m],'-') # resistivity prediction curve
plt.loglog(adat[1:ndat],pltanswerkeep[1:ndat], 'ro') # predicted data red dots
s=7
plt.loglog(adat[1:ndat],rdat[1:ndat],'bo',markersize=s) #original data blue dots
plt.show()
plt.grid(True)
sys.exit(0)
| lgpl-3.0 |