repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
XueqingLin/tensorflow | tensorflow/examples/learn/text_classification_builtin_rnn_model.py | 7 | 3091 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
def input_op_fn(x):
"""Customized function to transform batched x into embeddings."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = learn.ops.categorical_variable(x, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unpack(word_vectors, axis=1)
return word_list
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model: a single direction GRU with a single layer
classifier = learn.TensorFlowRNNClassifier(
rnn_size=EMBEDDING_SIZE, n_classes=15, cell_type='gru',
input_op_fn=input_op_fn, num_layers=1, bidirectional=False,
sequence_length=None, steps=1000, optimizer='Adam',
learning_rate=0.01, continue_training=True)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = classifier.predict(x_test)
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true'
)
FLAGS = parser.parse_args()
tf.app.run()
| apache-2.0 |
wijnandhoitinga/nutils | examples/platewithhole-nurbs.py | 1 | 5787 | #!/usr/bin/env python3
#
# In this script we solve the same infinite plane strain problem as in
# :ref:`examples/platewithhole.py`, but instead of using FCM to create the hole
# we use a NURBS-based mapping. A detailed description of the testcase can be
# found in Hughes et al., `Isogeometric analysis: CAD, finite elements, NURBS,
# exact geometry and mesh refinement`, Computer Methods in Applied Mechanics
# and Engineering, Elsevier, 2005, 194, 4135-4195.
from nutils import mesh, function, solver, export, cli, testing
import numpy, treelog
def main(nrefine:int, traction:float, radius:float, poisson:float):
'''
Horizontally loaded linear elastic plate with IGA hole.
.. arguments::
nrefine [2]
Number of uniform refinements starting from 1x2 base mesh.
traction [.1]
Far field traction (relative to Young's modulus).
radius [.5]
Cut-out radius.
poisson [.3]
Poisson's ratio, nonnegative and strictly smaller than 1/2.
'''
# create the coarsest level parameter domain
domain, geom0 = mesh.rectilinear([1, 2])
bsplinebasis = domain.basis('spline', degree=2)
controlweights = numpy.ones(12)
controlweights[1:3] = .5 + .25 * numpy.sqrt(2)
weightfunc = bsplinebasis.dot(controlweights)
nurbsbasis = bsplinebasis * controlweights / weightfunc
# create geometry function
indices = [0,2], [1,2], [2,1], [2,0]
controlpoints = numpy.concatenate([
numpy.take([0, 2**.5-1, 1], indices) * radius,
numpy.take([0, .3, 1], indices) * (radius+1) / 2,
numpy.take([0, 1, 1], indices)])
geom = (nurbsbasis[:,numpy.newaxis] * controlpoints).sum(0)
radiuserr = domain.boundary['left'].integral((function.norm2(geom) - radius)**2 * function.J(geom0), degree=9).eval()**.5
treelog.info('hole radius exact up to L2 error {:.2e}'.format(radiuserr))
# refine domain
if nrefine:
domain = domain.refine(nrefine)
bsplinebasis = domain.basis('spline', degree=2)
controlweights = domain.project(weightfunc, onto=bsplinebasis, geometry=geom0, ischeme='gauss9')
nurbsbasis = bsplinebasis * controlweights / weightfunc
ns = function.Namespace()
ns.x = geom
ns.lmbda = 2 * poisson
ns.mu = 1 - poisson
ns.ubasis = nurbsbasis.vector(2)
ns.u_i = 'ubasis_ni ?lhs_n'
ns.X_i = 'x_i + u_i'
ns.strain_ij = '(u_i,j + u_j,i) / 2'
ns.stress_ij = 'lmbda strain_kk δ_ij + 2 mu strain_ij'
ns.r2 = 'x_k x_k'
ns.R2 = radius**2 / ns.r2
ns.k = (3-poisson) / (1+poisson) # plane stress parameter
ns.scale = traction * (1+poisson) / 2
ns.uexact_i = 'scale (x_i ((k + 1) (0.5 + R2) + (1 - R2) R2 (x_0^2 - 3 x_1^2) / r2) - 2 δ_i1 x_1 (1 + (k - 1 + R2) R2))'
ns.du_i = 'u_i - uexact_i'
sqr = domain.boundary['top,bottom'].integral('(u_i n_i)^2 d:x' @ ns, degree=9)
cons = solver.optimize('lhs', sqr, droptol=1e-15)
sqr = domain.boundary['right'].integral('du_k du_k d:x' @ ns, degree=20)
cons = solver.optimize('lhs', sqr, droptol=1e-15, constrain=cons)
# construct residual
res = domain.integral('ubasis_ni,j stress_ij d:x' @ ns, degree=9)
# solve system
lhs = solver.solve_linear('lhs', res, constrain=cons)
# vizualize result
bezier = domain.sample('bezier', 9)
X, stressxx = bezier.eval(['X_i', 'stress_00'] @ ns, lhs=lhs)
export.triplot('stressxx.png', X, stressxx, tri=bezier.tri, hull=bezier.hull, clim=(numpy.nanmin(stressxx), numpy.nanmax(stressxx)))
# evaluate error
err = domain.integral('<du_k du_k, du_i,j du_i,j>_n d:x' @ ns, degree=9).eval(lhs=lhs)**.5
treelog.user('errors: L2={:.2e}, H1={:.2e}'.format(*err))
return err, cons, lhs
# If the script is executed (as opposed to imported), :func:`nutils.cli.run`
# calls the main function with arguments provided from the command line. For
# example, to keep with the default arguments simply run :sh:`python3
# platewithhole-nurbs.py`.
if __name__ == '__main__':
cli.run(main)
# Once a simulation is developed and tested, it is good practice to save a few
# strategic return values for regression testing. The :mod:`nutils.testing`
# module, which builds on the standard :mod:`unittest` framework, facilitates
# this by providing :func:`nutils.testing.TestCase.assertAlmostEqual64` for the
# embedding of desired results as compressed base64 data.
class test(testing.TestCase):
@testing.requires('matplotlib')
def test0(self):
err, cons, lhs = main(nrefine=0, traction=.1, radius=.5, poisson=.3)
with self.subTest('l2-error'):
self.assertAlmostEqual(err[0], .00199, places=5)
with self.subTest('h1-error'):
self.assertAlmostEqual(err[1], .02269, places=5)
with self.subTest('constraints'): self.assertAlmostEqual64(cons, '''
eNpjYGBoQIIggMZXOKdmnHRe3vjh+cvGDAwA6w0LgQ==''')
with self.subTest('left-hand side'): self.assertAlmostEqual64(lhs, '''
eNpjYJh07qLhhnOTjb0vTDdmAAKVcy/1u85lGYforQDzFc6pGSedlzd+eP4ykA8AvkQRaA==''')
@testing.requires('matplotlib')
def test2(self):
err, cons, lhs = main(nrefine=2, traction=.1, radius=.5, poisson=.3)
with self.subTest('l2-error'):
self.assertAlmostEqual(err[0], .00009, places=5)
with self.subTest('h1-error'):
self.assertAlmostEqual(err[1], .00286, places=5)
with self.subTest('constraints'): self.assertAlmostEqual64(cons, '''
eNpjYGBoIAKCwCBXp3kuysDjnLXR+3NPjTzPqxrnAnHeeQvjk+dTjZ9d2GG85soJYwYGAPkhPtE=''')
with self.subTest('left-hand side'): self.assertAlmostEqual64(lhs, '''
eNpjYOg890mv85yM4axz0kYHz+00Yj6vZJxzPtWY+0KPMffFucaml+caMwBB5LlCvYhzCw0qzu0wPHyu
0sjlPIsx14VoY/6LvcaxlxYZz7myCKzO+dwWPZdzBwzqz20z/Hguxmj2+TtGHRdsjHdfbDB2v7zUeMXV
pWB1VucC9B3OORmuOCdhZHR+ktGu87eNbC6oGstfLDA+eWm1seG19WB1Buf+6ruce2p469wco9Dzb4wm
n2c23nZe3djqQqpx88XNxrOv7gOr0zwXZeBxztro/bmnRp7nVY1zgTjvvIXxSaBfnl3YYbzmygmgOgDU
Imlr''')
| mit |
evgchz/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
jrabenoit/fizzy | estimators.py | 1 | 8082 | #!/usr/bin/env python3
import numpy as np
import pandas as pd
import copy, pickle
from sklearn import svm, naive_bayes, neighbors, ensemble, linear_model, tree, neural_network
#Quick note: feature_importances_ can be used with random forest etc. to generate feature importance lists
def InnerFolds():
with open('/media/james/ext4data1/current/projects/pfizer/icvfeats.pickle','rb') as f: icv=pickle.load(f)
patients= pd.read_csv('/media/james/ext4data1/current/projects/pfizer/labels-final.csv', encoding='utf-8').set_index('PATIENT')
folds= len(icv['X_train'])
rf= ensemble.RandomForestClassifier(n_estimators=201, criterion='entropy', max_depth=2)
et= ensemble.ExtraTreesClassifier(n_estimators=167, max_depth=2)
kn= neighbors.KNeighborsClassifier(n_neighbors=35, weights='distance')
nb= naive_bayes.GaussianNB()
nn= neural_network.MLPClassifier(hidden_layer_sizes=(122,),solver='sgd', max_iter=2000)
dt= tree.DecisionTreeClassifier(max_depth=2, splitter='random')
ls= svm.LinearSVC(penalty='l1', dual=False, C=0.075)
gb= ensemble.GradientBoostingClassifier()
ab= ensemble.AdaBoostClassifier(base_estimator= rf, learning_rate=0.9)
vc= ensemble.VotingClassifier(estimators=[('rf', rf),('kn', kn),('et',et)])
bc= ensemble.BaggingClassifier(base_estimator=rf, n_estimators=10)
est= {'randomforest': rf,
'extratrees': et,
'kneighbors': kn,
'naivebayes': nb,
'decisiontree': dt,
'linearsvc': ls,
'adaboost': ab,
'neuralnet': nn,
'voting': vc,
'hobbitses': bc,
'gboost': gb
}
train_results= {'fold':[], 'estimator':[], 'subjects':[],
'labels':[], 'predictions':[], 'scores':[],
'attempts':[]
}
test_results= {'fold':[], 'estimator':[], 'subjects':[],
'labels':[], 'predictions':[], 'scores':[],
'attempts':[]
}
for i in range(folds):
print(i+1)
X_train= icv['X_train'][i]
X_test= icv['X_test'][i]
y_train= icv['y_train'][i]
y_test= icv['y_test'][i]
train_ids= patients.index[icv['train_indices'][i]]
test_ids= patients.index[icv['test_indices'][i]]
for j,k in zip(est.keys(), est.values()):
k.fit(X_train, y_train)
predict_train= k.predict(X_train)
train_scores= [1 if x==y else 0 for x,y in zip(y_train, predict_train)]
train_results['fold'].extend([i+1]*len(X_train))
train_results['estimator'].extend([j]*len(X_train))
train_results['subjects'].extend(train_ids)
train_results['labels'].extend(y_train)
train_results['predictions'].extend(predict_train)
train_results['scores'].extend(train_scores)
train_results['attempts'].extend([1]*len(X_train))
predict_test= k.predict(X_test)
test_scores= [1 if x==y else 0 for x,y in zip(y_test, predict_test)]
test_results['fold'].extend([i+1]*len(X_test))
test_results['estimator'].extend([j]*len(X_test))
test_results['subjects'].extend(test_ids)
test_results['labels'].extend(y_test)
test_results['predictions'].extend(predict_test)
test_results['scores'].extend(test_scores)
test_results['attempts'].extend([1]*len(X_test))
train_df=pd.DataFrame.from_dict(train_results).set_index('subjects')
test_df=pd.DataFrame.from_dict(test_results).set_index('subjects')
train_df.to_csv(path_or_buf='/media/james/ext4data1/current/projects/pfizer/inner_train_results.csv')
test_df.to_csv(path_or_buf='/media/james/ext4data1/current/projects/pfizer/inner_test_results.csv')
trd= train_df.groupby('estimator').sum()
trsum= (trd['scores']/trd['attempts'])*100
print(trsum)
pmax= trsum.idxmax(axis=1)
print('\nBest train: {}\n'.format(pmax))
ted= test_df.groupby('estimator').sum()
tesum= (ted['scores']/ted['attempts'])*100
print(tesum)
pmax= tesum.idxmax(axis=1)
print('\nBest test: {}\n'.format(pmax))
return
def OuterFolds():
with open('/media/james/ext4data1/current/projects/pfizer/ocvfeats.pickle','rb') as f: ocv=pickle.load(f)
patients= pd.read_csv('/media/james/ext4data1/current/projects/pfizer/labels-final.csv', encoding='utf-8').set_index('PATIENT')
folds= len(ocv['X_train'])
rf= ensemble.RandomForestClassifier()
et= ensemble.ExtraTreesClassifier()
kn= neighbors.KNeighborsClassifier()
nb= naive_bayes.GaussianNB()
nn= neural_network.MLPClassifier()
dt= tree.DecisionTreeClassifier()
ls= svm.LinearSVC()
gb= ensemble.GradientBoostingClassifier()
ab= ensemble.AdaBoostClassifier()
vc= ensemble.VotingClassifier(estimators=[('rf', rf),('kn', kn),('et',et)])
bc= ensemble.BaggingClassifier(base_estimator=rf, n_estimators=100)
est= {#'randomforest': rf,
#'extratrees': et,
#'kneighbors': kn,
#'naivebayes': nb,
#'decisiontree': dt
'linearsvm': ls,
#'adaboost': ab
#'neuralnet': nn,
#'voting': vc
#'hobbitses': bc
#'gboost': gb
}
train_results= {'fold':[], 'estimator':[], 'subjects':[],
'labels':[], 'predictions':[], 'scores':[],
'attempts':[]
}
test_results= {'fold':[], 'estimator':[], 'subjects':[],
'labels':[], 'predictions':[], 'scores':[],
'attempts':[]
}
for i in range(folds):
X_train= ocv['X_train'][i]
X_test= ocv['X_test'][i]
y_train= ocv['y_train'][i]
y_test= ocv['y_test'][i]
train_ids= patients.index[ocv['train_indices'][i]]
test_ids= patients.index[ocv['test_indices'][i]]
for j,k in zip(est.keys(), est.values()):
k.fit(X_train, y_train)
predict_train= k.predict(X_train)
train_scores= [1 if x==y else 0 for x,y in zip(y_train, predict_train)]
train_results['fold'].extend([i+1]*len(X_train))
train_results['estimator'].extend([j]*len(X_train))
train_results['subjects'].extend(train_ids)
train_results['labels'].extend(y_train)
train_results['predictions'].extend(predict_train)
train_results['scores'].extend(train_scores)
train_results['attempts'].extend([1]*len(X_train))
predict_test= k.predict(X_test)
test_scores= [1 if x==y else 0 for x,y in zip(y_test, predict_test)]
test_results['fold'].extend([i+1]*len(X_test))
test_results['estimator'].extend([j]*len(X_test))
test_results['subjects'].extend(test_ids)
test_results['labels'].extend(y_test)
test_results['predictions'].extend(predict_test)
test_results['scores'].extend(test_scores)
test_results['attempts'].extend([1]*len(X_test))
train_df=pd.DataFrame.from_dict(train_results).set_index('subjects')
test_df=pd.DataFrame.from_dict(test_results).set_index('subjects')
train_df.to_csv(path_or_buf='/media/james/ext4data1/current/projects/pfizer/outer_train_results.csv')
test_df.to_csv(path_or_buf='/media/james/ext4data1/current/projects/pfizer/outer_test_results.csv')
trd= train_df.groupby('estimator').sum()
trsum= (trd['scores']/trd['attempts'])*100
print(trsum)
pmax= trsum.idxmax(axis=1)
print('\nBest train: {}\n'.format(pmax))
ted= test_df.groupby('estimator').sum()
tesum= (ted['scores']/ted['attempts'])*100
print(tesum)
pmax= tesum.idxmax(axis=1)
print('\nBest test: {}\n'.format(pmax))
return
| gpl-3.0 |
chuckwondo/ThinkStats2 | code/thinkstats2.py | 68 | 68825 | """This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else '_nolegend_'
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
return '%s(%s)' % (cls, str(self.d))
__repr__ = __str__
def __eq__(self, other):
return self.d == other.d
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
if min(self.d.keys()) is np.nan:
logging.warning('Hist: contains NaN, may not render correctly.')
return zip(*sorted(self.Items()))
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in sorted(self.d.items()):
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100.0
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
def __lt__(self, obj):
"""Less than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbLess(obj)
def __gt__(self, obj):
"""Greater than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbGreater(obj)
def __ge__(self, obj):
"""Greater than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self < obj)
def __le__(self, obj):
"""Less than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self > obj)
def Normalize(self, fraction=1.0):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0.0:
raise ValueError('Normalize: total probability is zero.')
#logging.warning('Normalize: total probability is zero.')
#return total
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0.0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
mean = 0.0
for x, p in self.d.items():
mean += p * x
return mean
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
var = 0.0
for x, p in self.d.items():
var += p * (x - mu) ** 2
return var
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def MaximumLikelihood(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 + v2, p1 * p2)
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def __mul__(self, other):
"""Computes the Pmf of the product of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.MulPmf(other)
except AttributeError:
return self.MulConstant(other)
def MulPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 * v2, p1 * p2)
return pmf
def MulConstant(self, other):
"""Computes the Pmf of the product of a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 * other, p1)
return pmf
def __div__(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.DivPmf(other)
except AttributeError:
return self.MulConstant(1/other)
__truediv__ = __div__
def DivPmf(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 / v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
return cdf.Max(k)
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100.0:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix.Incr(x, p1 * p2)
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf(object):
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else '_nolegend_'
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps))
__repr__ = __str__
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Values(self):
"""Returns a sorted list of values.
"""
return self.xs
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
# don't use +=, or else an int array + float yields int array
new.xs = new.xs + term
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
# don't use *=, or else an int array * float yields int array
new.xs = new.xs * factor
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0.0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0.0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def ValueArray(self, ps):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100.0)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100.0
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0.0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100.0) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000.0):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
If options includes low and high, it must also include n;
in that case the density is evaluated an n locations between
low and high, including both.
If options includes xs, the density is evaluate at those location.
Otherwise, self.GetLinspace is invoked to provide the locations.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100.0) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial PMF.
Returns the probabily of k successes in n trials with probability p.
"""
return stats.binom.pmf(k, n, p)
def EvalHypergeomPmf(k, N, K, n):
"""Evaluates the hypergeometric PMF.
Returns the probabily of k successes in n trials from a population
N with K successes in it.
"""
return stats.hypergeom.pmf(k, N, K, n)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
# don't use the scipy function (yet). for lam=0 it returns NaN;
# should be 0.0
# return stats.poisson.pmf(k, lam)
return lam ** k * math.exp(-lam) / special.gamma(k+1)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = EvalPoissonPmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta(object):
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
"""
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = [special.betainc(self.alpha, self.beta, x) for x in xs]
cdf = Cdf(xs, ps)
return cdf
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0.0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
return np.random.uniform(-jitter, +jitter, n) + values
def NormalProbabilityPlot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, **options)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
def MapToRanks(t):
"""Returns a list of ranks corresponding to the elements in t.
Args:
t: sequence of numbers
Returns:
list of integer ranks, starting at 1
"""
# pair up each value with its index
pairs = enumerate(t)
# sort by value
sorted_pairs = sorted(pairs, key=itemgetter(1))
# pair up each pair with its rank
ranked = enumerate(sorted_pairs)
# sort by index
resorted = sorted(ranked, key=lambda trip: trip[1][0])
# extract the ranks
ranks = [trip[0]+1 for trip in resorted]
return ranks
def LeastSquares(xs, ys):
"""Computes a linear least squares fit for ys as a function of xs.
Args:
xs: sequence of values
ys: sequence of values
Returns:
tuple of (intercept, slope)
"""
meanx, varx = MeanVar(xs)
meany = Mean(ys)
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
def FitLine(xs, inter, slope):
"""Fits a line to the given data.
xs: sequence of x
returns: tuple of numpy arrays (sorted xs, fit ys)
"""
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
def Residuals(xs, ys, inter, slope):
"""Computes residuals for a linear fit with parameters inter and slope.
Args:
xs: independent variable
ys: dependent variable
inter: float intercept
slope: float slope
Returns:
list of residuals
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
def CoefDetermination(ys, res):
"""Computes the coefficient of determination (R^2) for given residuals.
Args:
ys: dependent variable
res: residuals
Returns:
float coefficient of determination
"""
return 1 - Var(res) / Var(ys)
def CorrelatedGenerator(rho):
"""Generates standard normal variates with serial correlation.
rho: target coefficient of correlation
Returns: iterable
"""
x = random.gauss(0, 1)
yield x
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield x
def CorrelatedNormalGenerator(mu, sigma, rho):
"""Generates normal variates with serial correlation.
mu: mean of variate
sigma: standard deviation of variate
rho: target coefficient of correlation
Returns: iterable
"""
for x in CorrelatedGenerator(rho):
yield x * sigma + mu
def RawMoment(xs, k):
"""Computes the kth raw moment of xs.
"""
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
"""Computes the kth central moment of xs.
"""
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
def StandardizedMoment(xs, k):
"""Computes the kth standardized moment of xs.
"""
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
"""Computes skewness.
"""
return StandardizedMoment(xs, 3)
def Median(xs):
"""Computes the median (50th percentile) of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: float
"""
cdf = Cdf(xs)
return cdf.Value(0.5)
def IQR(xs):
"""Computes the interquartile of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: pair of floats
"""
cdf = Cdf(xs)
return cdf.Value(0.25), cdf.Value(0.75)
def PearsonMedianSkewness(xs):
"""Computes the Pearson median skewness.
"""
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def ReadFixedWidth(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pandas.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def ReadStataDct(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float, double=float)
var_info = []
for line in open(dct_file, **options):
match = re.search( r'_column\(([^)]*)\)', line)
if match:
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pandas.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables.loc[len(variables)-1, 'end'] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
def SampleRows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def ResampleRows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return SampleRows(df, len(df), replace=True)
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column]
cdf = Cdf(dict(weights))
indices = cdf.Sample(len(weights))
sample = df.loc[indices]
return sample
def PercentileRow(array, p):
"""Selects the row from a sorted array that maps to percentile p.
p: float 0--100
returns: NumPy array (one row)
"""
rows, cols = array.shape
index = int(rows * p / 100)
return array[index,]
def PercentileRows(ys_seq, percents):
"""Given a collection of lines, selects percentiles along vertical axis.
For example, if ys_seq contains simulation results like ys as a
function of time, and percents contains (5, 95), the result would
be a 90% CI for each vertical slice of the simulation results.
ys_seq: sequence of lines (y values)
percents: list of percentiles (0-100) to select
returns: list of NumPy arrays, one for each percentile
"""
nrows = len(ys_seq)
ncols = len(ys_seq[0])
array = np.zeros((nrows, ncols))
for i, ys in enumerate(ys_seq):
array[i,] = ys
array = np.sort(array, axis=0)
rows = [PercentileRow(array, p) for p in percents]
return rows
def Smooth(xs, sigma=2, **options):
"""Smooths a NumPy array with a Gaussian filter.
xs: sequence
sigma: standard deviation of the filter
"""
return ndimage.filters.gaussian_filter1d(xs, sigma, **options)
class HypothesisTest(object):
"""Represents a hypothesis test."""
def __init__(self, data):
"""Initializes.
data: data in whatever form is relevant
"""
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
self.test_stats = None
self.test_cdf = None
def PValue(self, iters=1000):
"""Computes the distribution of the test statistic and p-value.
iters: number of iterations
returns: float p-value
"""
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
self.test_cdf = Cdf(self.test_stats)
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def MaxTestStat(self):
"""Returns the largest test statistic seen during simulations.
"""
return max(self.test_stats)
def PlotCdf(self, label=None):
"""Draws a Cdf with vertical lines at the observed test stat.
"""
def VertLine(x):
"""Draws a vertical line at x."""
thinkplot.Plot([x, x], [0, 1], color='0.8')
VertLine(self.actual)
thinkplot.Cdf(self.test_cdf, label=label)
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
raise UnimplementedMethodException()
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
pass
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
raise UnimplementedMethodException()
def main():
pass
if __name__ == '__main__':
main()
| gpl-3.0 |
shyamalschandra/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 73 | 2074 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
dipapaspyros/bdo_platform | visualizer/utils.py | 1 | 38302 | import json
from django.contrib.auth.models import User
from django.http import HttpResponse
from PIL import Image, ImageChops
from query_designer.models import Query, AbstractQuery
import numpy as np
from math import floor, ceil
import folium.plugins as plugins
import folium
import numpy as np
import requests
import collections
from django.db import connections
from django.conf import settings
from service_builder.models import ServiceInstance
def convert_unicode_json(data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(convert_unicode_json, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert_unicode_json, data))
else:
return data
def fig2data(fig):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw()
# Get the RGBA buffer from the figure
w, h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
return buf
def fig2img ( fig ):
"""
@brief Convert a Matplotlib figure to a PIL Image in RGBA format and return it
@param fig a matplotlib figure
@return a Python Imaging Library ( PIL ) image
"""
# put the figure pixmap into a numpy array
buf = fig2data ( fig )
w, h, d = buf.shape
return Image.fromstring( "RGBA", ( w ,h ), buf.tostring( ) )
def get_test_data(query, user):
if str(query).isdigit():
q = Query.objects.get(pk=int(query))
result_json = q.execute()
return result_json['headers'], result_json['results']
else:
print 'het'
print 'query: ' + query
q = Query(user=User.objects.get(username='BigDataOcean'), document=json.loads(str(query).replace('%20', ' ')))
# q = Query(user=User.objects.get(username='BigDataOcean'))
# doc = json.loads(request.POST.get('document', ''))
print q
result_json = q.execute()
return result_json['headers'], result_json['results']
def filter_data(d, other_dims, other_dims_first_vals):
for dim, val in zip(other_dims, other_dims_first_vals):
if str(d[dim]) != val:
return 1
return 0
def trim(img):
border = Image.new(img.mode, img.size, img.getpixel((0, 0)))
diff = ImageChops.difference(img, border)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
img = img.crop(bbox)
return np.array(img)
def create_grid_data(lats, lons, data):
grid = list()
grid_count = list()
for i in range(0, len(lons)):
grid_row = list()
grid_count_row = list()
for j in range(0, len(lats)):
grid_row.append(None)
grid_count_row.append(0)
grid.append(grid_row)
grid_count.append(grid_count_row)
for item in data:
j = 0
for l in lats:
if item[0] < l:
j += 1
i = 0
for l in lons:
if item[1] < l:
i += 1
if grid[i][j] is None:
grid[i][j] = item[2]
grid_count[i][j] = 1
else:
grid[i][j] += item[2]
grid_count[i][j] += 1
for i in range(0, len(lons)):
for j in range(0, len(lats)):
if grid_count[i][j] > 0:
grid[i][j] /= grid_count[i][j]
# print grid
# print grid_count
return grid
# TODO: replace 100 and make it configurable
def create_bins(list, step):
return np.arange(floor(list.min() * 100) / 100, ceil(list.max() * 100) / 100 + 0.00001, step)
def create_folium_map(location=[0,0], zoom_start=2, max_zoom=13):
tiles_str = 'https://api.mapbox.com/v4/mapbox.satellite/{z}/{x}/{y}.png?access_token='
token_str = 'pk.eyJ1IjoiZ3RzYXBlbGFzIiwiYSI6ImNqOWgwdGR4NTBrMmwycXMydG4wNmJ5cmMifQ.laN_ZaDUkn3ktC7VD0FUqQ'
attr_str = 'Map data ©<a href="http://openstreetmap.org">OpenStreetMap</a>contributors, ' \
'<a href="http://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a>, ' \
'Imagery \u00A9 <a href="http://mapbox.com">Mapbox</a>'
m = folium.Map(location=location,
zoom_start=zoom_start,
max_zoom=max_zoom,
tiles=tiles_str + token_str,
attr=attr_str,
control_scale=True)
# folium.TileLayer('openstreetmap').add_to(m)
plugins.Fullscreen(
position='topright',
title='Expand',
title_cancel='Exit',
force_separate_button=True).add_to(m)
return m
def create_zep_note(name):
data = dict()
data['name'] = name
str_data = json.dumps(data)
# Make a post request
response = requests.post(settings.ZEPPELIN_URL+"/api/notebook", data=str_data)
print response
response_json = response.json()
notebook_id = response_json['body']
print notebook_id
return notebook_id
def clone_zep_note(notebook_id, name):
data = dict()
data['name'] = name
str_data = json.dumps(data)
# Make a post request
response = requests.post(settings.ZEPPELIN_URL+"/api/notebook/"+notebook_id, data=str_data)
print response
response_json = response.json()
notebook_id = response_json['body']
print notebook_id
return notebook_id
def run_zep_note(notebook_id, exclude=[], mode='zeppelin'):
if mode == 'livy':
session_id = create_livy_session(notebook_id)
response_status = 500
# number of tries
counter = 1
paragraphs = []
response = requests.get(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id))
response_json = response.json()
for p in response_json['body']['paragraphs']:
if str(p['id']) not in exclude:
paragraphs.append(p['id'])
else:
print 'excluded paragraph: {0}'.format(str(p['id']))
if mode == 'livy':
for p in paragraphs:
run_result = run_zep_paragraph(notebook_id, p, session_id, mode)
return session_id
else:
for p in paragraphs:
run_result = run_zep_paragraph(notebook_id, p, 0, mode, 1)
if run_result == 1:
paragraphs = []
response = requests.get(settings.ZEPPELIN_URL + "/api/notebook/" + str(notebook_id))
response_json = response.json()
for p in response_json['body']['paragraphs']:
if str(p['id']) not in exclude:
paragraphs.append(p['id'])
else:
print 'excluded paragraph: {0}'.format(str(p['id']))
for p in paragraphs:
run_result = run_zep_paragraph(notebook_id, p, 0, mode, 2)
break
return 0
def create_zep_test_query_paragraph(notebook_id, title, raw_query):
data = dict()
data['title'] = title
data['text'] = '%spark.pyspark' \
'\ndf = spark.read.format("jdbc")' \
'.option("url", "jdbc:postgresql://localhost:5432/bdo_platform?user=postgres&password=1234")' \
'.option("driver", "org.postgresql.Driver")' \
'.option("database", "bdo_platform")' \
'.option("dbtable", "(SELECT * FROM (SELECT temperature.value AS i0_value,temperature.time_4 AS i0_time,temperature.depth_5 AS i0_depth,temperature.lat_6 AS i0_location_latitude,temperature.lon_7 AS i0_location_longitude FROM votemper_2 AS temperature LIMIT 10 ) AS SQ1 ) AS SPARKQ0")' \
'.load()' \
'\ndf.printSchema()'
str_data = json.dumps(data)
response = requests.post("http://localhost:8080/api/notebook/" + str(notebook_id) + "/paragraph", data=str_data)
print response
response_json = response.json()
paragraph_id = response_json['body']
print paragraph_id
return paragraph_id
def create_zep_arguments_paragraph(notebook_id, title, args_json_string):
data = dict()
data['title'] = title
data['index'] = 1
data['text'] = '%spark.pyspark' \
'\nimport json' \
'\narguments = dict()' \
'\nresult = dict()' \
'\narguments = json.loads(\'{0}\')' \
'\nprint arguments'.format(args_json_string)
print args_json_string
str_data = json.dumps(data)
response = requests.post(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph", data=str_data)
print response
response_json = response.json()
paragraph_id = response_json['body']
print paragraph_id
return paragraph_id
def create_zep__query_paragraph(notebook_id, title, raw_query, index=-1, df_name="df"):
data = dict()
if index >= 0:
data['index'] = index
data['title'] = title
conn_dict = connections[settings.ZEPPELIN_DB].settings_dict
data['text'] = '%spark.pyspark' \
'\n'+df_name+'= load_df("(' + str(raw_query).replace("\n", " ") + ') AS SPARKQ0")' \
'\n'+df_name+'.printSchema()'
data['editorHide'] = True
# data['text'] = '%spark.pyspark' \
# '\n' + df_name + '= spark.read.format("jdbc")' \
# '.option("url", "jdbc:postgresql://localhost:5432/bdo_platform?user=postgres&password=1234")' \
# '.option("driver", "org.postgresql.Driver")' \
# '.option("database", "bdo_platform")' \
# '.option("dbtable", "(' + str(raw_query).replace("\n", " ") + ') AS SPARKQ0")' \
# '.load()' \
# '\n' + df_name + '.printSchema()'
str_data = json.dumps(data)
response = requests.post(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph", data=str_data)
print response
response_json = response.json()
paragraph_id = response_json['body']
print paragraph_id
return paragraph_id
def delete_zep_paragraph(notebook_id, paragraph_id):
data = dict()
str_data = json.dumps(data)
response = requests.delete(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph/" + str(paragraph_id), data=str_data)
print response
def delete_zep_notebook(notebook_id):
data = dict()
str_data = json.dumps(data)
response = requests.delete(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id), data=str_data)
print response
def execute_code_on_livy(code, session_id, kind):
print 'executing code on livy'
host = settings.LIVY_URL
headers = {'Content-Type': 'application/json', 'X-Requested-By': 'Admin'}
data = dict()
data['code'] = code
data['kind'] = kind
statements_url = host + '/sessions/{0}/statements'.format(session_id)
response = requests.post(statements_url, data=json.dumps(data), headers=headers).json()
print response
try:
statement_id = response['id']
state = ''
from time import sleep
sleep(3) # Time in seconds.
while state != 'available':
state = requests.get(host + '/sessions/' + str(session_id) + '/statements/' + str(statement_id), headers=headers).json()['state']
if state == 'error' or state == 'cancelling' or state == 'cancelled':
raise Exception('Failed')
except Exception:
raise Exception('Failed')
return statement_id
def run_zep_paragraph(notebook_id, paragraph_id, livy_session_id, mode, attempt=2):
if mode == 'livy':
data = dict()
str_data = json.dumps(data)
response = requests.get(settings.ZEPPELIN_URL + "/api/notebook/" + str(notebook_id) + "/paragraph/" + str(paragraph_id), data=str_data)
print response
response_json = convert_unicode_json(response.json())
code = str(response_json['body']['text']).strip().replace("u'{", "{")
kind = 'spark'
if 'pyspark' in code:
kind = 'pyspark'
code = code.replace('%spark.pyspark\n', '').replace('%spark.pyspark', '').replace('%pyspark\n', '').replace('%pyspark', '').replace('%spark\n', '').replace('%spark', '')
if code is not None or code != '':
execute_code_on_livy(code=code, session_id=livy_session_id, kind=kind)
return 0 #end
else:
response_status = 500
counter = 3
while response_status != 200:
data = dict()
str_data = json.dumps(data)
response = requests.post(settings.ZEPPELIN_URL+"/api/notebook/run/" + str(notebook_id) + "/" + str(paragraph_id), data=str_data)
print response
counter -= 1
response_status = response.status_code
if response_status != 200 and counter == 1:
if attempt == 1:
import time
time.sleep(7)
restart_zep_interpreter(settings.ZEPPELIN_SPARK_INTERPRETER)
return 1
else:
# return 2
return HttpResponse(status=500)
if counter == 0:
# return 2
return HttpResponse(status=500)
def create_zep_viz_paragraph(notebook_id, title):
data = dict()
data['title'] = 'bdo_test_paragraph'
data['text'] = '%sql' \
'\nselect * from tempTablePostgres'
str_data = json.dumps(data)
response = requests.post(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph", data=str_data)
print response
response_json = response.json()
paragraph_id = response_json['body']
print paragraph_id
return paragraph_id
def create_zep_sort_paragraph(notebook_id, title, sort_col):
data = dict()
data['title'] = 'bdo_test_paragraph'
data['text'] = '%spark.pyspark' \
'\ndf = df.sort("' + sort_col + '")'
str_data = json.dumps(data)
response = requests.post(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph", data=str_data)
print response
response_json = response.json()
paragraph_id = response_json['body']
print paragraph_id
return paragraph_id
def create_zep_reg_table_paragraph(notebook_id, title):
data = dict()
data['title'] = 'bdo_test_paragraph'
data['text'] = '%spark.pyspark' \
'\ndf.registerTempTable("tempTablePostgres")'
str_data = json.dumps(data)
response = requests.post(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph", data=str_data)
print response
response_json = response.json()
paragraph_id = response_json['body']
print paragraph_id
return paragraph_id
def set_zep_paragraph_line_chart(notebook_id, paragraph_id, query_doc, y_vars, x_var):
config = {
"results": {
"0": {
"graph": {
"mode": "lineChart",
"height": 300.0,
"optionOpen": 'true',
"keys": [{
"name": "i0_time",
"index": 1.0,
"aggr": "sum"
}],
"values": [{
"name": "i0_value",
"index": 0.0,
"aggr": "sum"
}],
"groups": []
},
"helium": {}
}
}
}
x_index = -1
y_index = -1
counter = 0
for from_i in query_doc['from']:
for select_i in from_i['select']:
if select_i['name'] == x_var:
x_index = counter
if select_i['name'] == y_vars:
y_index = counter
counter += 1
x_config_list = []
x_config = dict()
x_config['name'] = x_var
x_config['index'] = float(x_index)
x_config['aggr'] = 'sum'
x_config_list.append(x_config)
config["results"]["0"]["graph"]["keys"] = x_config_list
y_config_list = []
y_config = dict()
y_config['name'] = y_vars
y_config['index'] = float(y_index)
y_config['aggr'] = 'sum'
y_config_list.append(y_config)
config["results"]["0"]["graph"]["values"] = y_config_list
str_config = json.dumps(config)
response = requests.put(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph/" + str(paragraph_id) + "/config", data=str_config)
print response.json()
def set_zep_paragraph_bar_chart(notebook_id, paragraph_id, query_doc, y_vars, x_var):
config = {
"results": {
"0": {
"graph": {
"mode": "multiBarChart",
"height": 300.0,
"optionOpen": 'true',
"keys": [{
"name": "i0_time",
"index": 1.0,
"aggr": "sum"
}],
"values": [{
"name": "i0_value",
"index": 0.0,
"aggr": "sum"
}],
"groups": []
},
"helium": {}
}
}
}
x_index = -1
y_index = -1
counter = 0
for from_i in query_doc['from']:
for select_i in from_i['select']:
if select_i['name'] == x_var:
x_index = counter
if select_i['name'] == y_vars:
y_index = counter
counter += 1
x_config_list = []
x_config = dict()
x_config['name'] = x_var
x_config['index'] = float(x_index)
x_config['aggr'] = 'sum'
x_config_list.append(x_config)
config["results"]["0"]["graph"]["keys"] = x_config_list
y_config_list = []
y_config = dict()
y_config['name'] = y_vars
y_config['index'] = float(y_index)
y_config['aggr'] = 'sum'
y_config_list.append(y_config)
config["results"]["0"]["graph"]["values"] = y_config_list
str_config = json.dumps(config)
response = requests.put(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph/" + str(paragraph_id) + "/config", data=str_config)
print response.json()
def set_zep_paragraph_area_chart(notebook_id, paragraph_id, query_doc, y_vars, x_var):
config = {
"results": {
"0": {
"graph": {
"mode": "stackedAreaChart",
"height": 300.0,
"optionOpen": 'true',
"keys": [{
"name": "i0_time",
"index": 1.0,
"aggr": "sum"
}],
"values": [{
"name": "i0_value",
"index": 0.0,
"aggr": "sum"
}],
"groups": []
},
"helium": {}
}
}
}
x_index = -1
y_index = -1
counter = 0
for from_i in query_doc['from']:
for select_i in from_i['select']:
if select_i['name'] == x_var:
x_index = counter
if select_i['name'] == y_vars:
y_index = counter
counter += 1
x_config_list = []
x_config = dict()
x_config['name'] = x_var
x_config['index'] = float(x_index)
x_config['aggr'] = 'sum'
x_config_list.append(x_config)
config["results"]["0"]["graph"]["keys"] = x_config_list
y_config_list = []
y_config = dict()
y_config['name'] = y_vars
y_config['index'] = float(y_index)
y_config['aggr'] = 'sum'
y_config_list.append(y_config)
config["results"]["0"]["graph"]["values"] = y_config_list
str_config = json.dumps(config)
response = requests.put(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph/" + str(paragraph_id) + "/config", data=str_config)
print response.json()
def set_zep_paragraph_scatter_chart(notebook_id, paragraph_id, query_doc, y_vars, x_var):
config = {
"results": {
"0": {
"graph": {
"mode": "scatterChart",
"height": 300.0,
"optionOpen": 'true',
"keys": [
{
"index": 3,
"name": "i0_location_latitude",
"aggr": "sum"
}
],
"values": [
{
"index": 0,
"name": "i0_value",
"aggr": "sum"
}
],
"groups": [],
"setting": {
"scatterChart": {
"yAxis": {
"name": "i0_value",
"index": 0,
"aggr": "sum"
},
"xAxis": {
"name": "i0_location_latitude",
"index": 3,
"aggr": "sum"
}
}
}
},
"helium": {}
}
}
}
x_index = -1
y_index = -1
counter = 0
for from_i in query_doc['from']:
for select_i in from_i['select']:
if select_i['name'] == x_var:
x_index = counter
if select_i['name'] == y_vars:
y_index = counter
counter += 1
x_config_list = []
x_config = dict()
x_config['name'] = x_var
x_config['index'] = float(x_index)
x_config['aggr'] = 'sum'
x_config_list.append(x_config)
config["results"]["0"]["graph"]["keys"] = x_config_list
config["results"]["0"]["graph"]["setting"]["scatterChart"]["xAxis"] = x_config_list[0]
y_config_list = []
y_config = dict()
y_config['name'] = y_vars
y_config['index'] = float(y_index)
y_config['aggr'] = 'sum'
y_config_list.append(y_config)
config["results"]["0"]["graph"]["values"] = y_config_list
config["results"]["0"]["graph"]["setting"]["scatterChart"]["yAxis"] = y_config_list[0]
str_config = json.dumps(config)
response = requests.put(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph/" + str(paragraph_id) + "/config", data=str_config)
print response.json()
def set_zep_paragraph_pie_chart(notebook_id, paragraph_id, query_doc, value_vars, key_var):
config = {
"results": {
"0": {
"graph": {
"mode": "pieChart",
"height": 300.0,
"optionOpen": 'true',
"keys": [{
"name": "i0_time",
"index": 1.0,
"aggr": "sum"
}],
"values": [{
"name": "i0_value",
"index": 0.0,
"aggr": "sum"
}],
"groups": []
},
"helium": {}
}
}
}
x_index = -1
y_index = -1
counter = 0
for from_i in query_doc['from']:
for select_i in from_i['select']:
if select_i['name'] == key_var:
x_index = counter
if select_i['name'] == value_vars:
y_index = counter
counter += 1
x_config_list = []
x_config = dict()
x_config['name'] = key_var
x_config['index'] = float(x_index)
x_config['aggr'] = 'sum'
x_config_list.append(x_config)
config["results"]["0"]["graph"]["keys"] = x_config_list
y_config_list = []
y_config = dict()
y_config['name'] = value_vars
y_config['index'] = float(y_index)
y_config['aggr'] = 'sum'
y_config_list.append(y_config)
config["results"]["0"]["graph"]["values"] = y_config_list
str_config = json.dumps(config)
response = requests.put(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph/" + str(paragraph_id) + "/config", data=str_config)
print response.json()
def restart_zep_interpreter(interpreter_id):
print "RESTARTING INTERPRETER!!!"
interpreter_id = 'settings.ZEPPELIN_SPARK_INTERPRETER'
response = requests.put(settings.ZEPPELIN_URL+"/api/interpreter/setting/restart/"+interpreter_id)
print response
def create_zep_drop_all_paragraph(notebook_id, title):
data = dict()
data['title'] = 'bdo_test_paragraph'
data['text'] = '%spark.pyspark' \
'\nsqlContext.dropTempTable("tempTablePostgres")' \
'\ndf=None'
str_data = json.dumps(data)
response = requests.post(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph", data=str_data)
print response
response_json = response.json()
paragraph_id = response_json['body']
print paragraph_id
return paragraph_id
def create_zep_toJSON_paragraph(notebook_id, title, df_name, order_by='', order_type='ASC'):
data = dict()
data['title'] = 'bdo_test_paragraph'
if order_by != '':
if order_type == 'ASC':
data['text'] = '%spark.pyspark' \
'\n{0}.orderBy("{1}", ascending=True).toJSON().collect()'.format(df_name, order_by)
else:
data['text'] = '%spark.pyspark' \
'\n{0}.orderBy("{1}", ascending=False).toJSON().collect()'.format(df_name, order_by)
else:
data['text'] = '%spark.pyspark' \
'\n{0}.toJSON().collect()'.format(df_name)
str_data = json.dumps(data)
response = requests.post(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph", data=str_data)
print response
response_json = convert_unicode_json(response.json())
paragraph_id = response_json['body']
print paragraph_id
return paragraph_id
def create_zep_tempView_paragraph(notebook_id, title, df_name):
data = dict()
data['title'] = 'bdo_test_paragraph'
data['text'] = '%spark.pyspark' \
'\n{0}.createTempView("{0}_scala")'.format(df_name)
str_data = json.dumps(data)
response = requests.post(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph", data=str_data)
print response
response_json = convert_unicode_json(response.json())
paragraph_id = response_json['body']
print paragraph_id
return paragraph_id
def create_zep_scala_histogram_paragraph(notebook_id, title, df_name, hist_col, num_of_bins):
data = dict()
data['title'] = 'bdo_test_paragraph'
data['text'] = '%spark' \
'\nval {0}_scala = spark.table("{0}_scala")' \
'\nval (startValues,counts) = {0}_scala.select("{1}").map(value => value.getDouble(0)).rdd.histogram({2})' \
'\nval {0}_scala = sc.parallelize(startValues zip counts).toDF("startValues","counts").withColumn("startValues", round($"startValues", 3))' \
'\nspark.sqlContext.dropTempTable("{0}_scala")'.format(df_name, hist_col, num_of_bins)
str_data = json.dumps(data)
response = requests.post(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph", data=str_data)
print response
response_json = convert_unicode_json(response.json())
paragraph_id = response_json['body']
print paragraph_id
return paragraph_id
def create_zep_scala_toJSON_paragraph(notebook_id, title, df_name):
data = dict()
data['title'] = 'bdo_test_paragraph'
data['text'] = '%spark' \
'\n{0}_scala.orderBy("startValues").toJSON.collect'.format(df_name)
str_data = json.dumps(data)
response = requests.post(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph", data=str_data)
print response
response_json = convert_unicode_json(response.json())
paragraph_id = response_json['body']
print paragraph_id
return paragraph_id
def get_zep_scala_toJSON_paragraph_response(notebook_id, paragraph_id):
print "request: "+settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph/"+str(paragraph_id)
response = requests.get(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph/"+str(paragraph_id))
print response
response_json = convert_unicode_json(response.json())
re = '[' + str(response_json['body']['results']['msg'][0]['data'].split('Array(')[1].split(')\n')[0]) + ']'
json_data = json.loads(re)
json_data = convert_unicode_json(json_data)
return json_data
def get_zep_toJSON_paragraph_response(notebook_id, paragraph_id):
print "request: "+settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph/"+str(paragraph_id)
response = requests.get(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph/"+str(paragraph_id))
print "get_zep_toJSON_paragraph_response:"
print response
# import pdb
# pdb.set_trace()
response_json = convert_unicode_json(response.json())
json_data = json.loads(str(response_json['body']['results']['msg'][0]['data']).strip().replace("u'{", "{").replace("}'", "}").replace("'", '"'))
print json_data[:3]
print type(json_data)
json_data = convert_unicode_json(json_data)
print json_data[:3]
# print type(json_data)
return json_data
def create_zep_getDict_paragraph(notebook_id, title, dict_name):
data = dict()
data['title'] = 'bdo_test_paragraph'
data['text'] = '%spark.pyspark' \
'\nprint {0}'.format(dict_name)
str_data = json.dumps(data)
response = requests.post(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph", data=str_data)
print response
response_json = convert_unicode_json(response.json())
paragraph_id = response_json['body']
print paragraph_id
return paragraph_id
def get_zep_getDict_paragraph_response(notebook_id, paragraph_id):
response = requests.get(settings.ZEPPELIN_URL+"/api/notebook/" + str(notebook_id) + "/paragraph/"+str(paragraph_id))
print response
response_json = convert_unicode_json(response.json())
json_data = json.loads(str(response_json['body']['results']['msg'][0]['data']).strip().replace("u'{", "{").replace("}'", "}").replace("'", '"'))
json_data = convert_unicode_json(json_data)
return json_data
def create_livy_session(notebook_id):
print 'looking for livy session'
host = settings.LIVY_URL
headers = {'Content-Type': 'application/json', 'X-Requested-By': 'Admin'}
data = { 'kind': 'pyspark',
'jars': ['/user/livy/jars/postgresql-42.2.2.jar', '/user/livy/jars/presto-jdbc-0.213.jar'],
'driverMemory': '2g',
'driverCores': 2,
'numExecutors': 1,
'executorMemory': '2g',
'executorCores': 2,
# 'heartbeatTimeoutInSecond': 120,
'conf': {'spark.driver.maxResultSize': '2g'}}
response = requests.post(host + '/sessions', data=json.dumps(data), headers=headers).json()
# print response
sessions = requests.get(host + '/sessions', headers=headers).json()['sessions']
ids_states = [(int(s['id']),s['state'] ) for s in sessions]
print 'session ids'
print ids_states
cnt=0
session_id = -1
print 'looking for session in list'
for (id, state) in ids_states:
cnt += 1
if len(ServiceInstance.objects.filter(livy_session=id)) == 0:
if state == 'starting' or state == 'idle':
serviceInstance = ServiceInstance.objects.get(notebook_id=notebook_id)
serviceInstance.livy_session = id
serviceInstance.save()
session_id = id
break
print 'found session?'
print session_id
response = requests.post(host + '/sessions', data=json.dumps(data), headers=headers).json()
if session_id == -1:
try:
session_id = response['id']
except Exception:
raise Exception('Failed')
try:
state = ''
while state != 'idle':
state = requests.get(host + '/sessions/' + str(session_id) + '/state', headers=headers).json()['state']
if state == 'error' or state == 'dead':
raise Exception('Failed')
except Exception:
raise Exception('Failed')
return session_id
def create_livy_query_statement(session_id, raw_query):
host = 'http://bdo-dev.epu.ntua.gr:8998'
headers = {'Content-Type': 'application/json', 'X-Requested-By': 'Admin'}
raw_query = '(SELECT * FROM (SELECT * from wind_speed_11) AS SQ1 LIMIT 1000000) AS tmp'
data = dict()
data['code'] = 'df = spark.read.format("jdbc")' \
'.option("url", "jdbc:postgresql://localhost:5432/bdo_platform?user=postgres&password=bdo!")' \
'.option("driver", "org.postgresql.Driver")' \
'.option("database", "bdo_platform")' \
'.option("dbtable", "' + str(raw_query).replace("\n", " ") + '")' \
'.load()' \
'\n#df.show()'
statements_url = host + '/sessions/{0}/statements'.format(session_id)
response = requests.post(statements_url, data=json.dumps(data), headers=headers).json()
print response
try:
statement_id = response['id']
state = ''
while state != 'available':
state = requests.get(host + '/sessions/' + str(session_id) + '/statements/' + str(statement_id), headers=headers).json()['state']
if state == 'error' or state == 'cancelling' or state == 'cancelled':
raise Exception('Failed')
except Exception:
raise Exception('Failed')
return statement_id
def create_livy_toJSON_paragraph(session_id, df_name, order_by='', order_type='ASC'):
host = settings.LIVY_URL
headers = {'Content-Type': 'application/json', 'X-Requested-By': 'Admin'}
data = dict()
if order_by != '':
if order_type == 'ASC':
data['code'] = '{0}.orderBy("{1}", ascending=True).toJSON().collect()'.format(df_name, order_by)
else:
data['code'] = '{0}.orderBy("{1}", ascending=False).toJSON().collect()'.format(df_name, order_by)
else:
data['code'] = '{0}.toJSON().collect()'.format(df_name)
statements_url = host + '/sessions/{0}/statements'.format(session_id)
response = requests.post(statements_url, data=json.dumps(data), headers=headers).json()
print response
try:
statement_id = response['id']
state = ''
while state != 'available':
response = requests.get(host + '/sessions/' + str(session_id) + '/statements/' + str(statement_id), headers=headers).json()
state = response['state']
if state == 'error' or state == 'cancelling' or state == 'cancelled':
raise Exception('Failed')
except Exception:
raise Exception('Failed')
return json.loads(response['output']['data']['text/plain'].replace("\'","'").replace("u'{", "{").replace("}',", "},").replace("}']", "}]").replace("'", '"'))
def create_livy_scala_toJSON_paragraph(session_id, df_name):
host = settings.LIVY_URL
headers = {'Content-Type': 'application/json', 'X-Requested-By': 'Admin'}
data = dict()
data['code'] = '{0}_scala.orderBy("startValues").toJSON.collect'.format(df_name)
data['kind'] = 'spark'
statements_url = host + '/sessions/{0}/statements'.format(session_id)
response = requests.post(statements_url, data=json.dumps(data), headers=headers).json()
print response
try:
statement_id = response['id']
state = ''
while state != 'available':
response = requests.get(host + '/sessions/' + str(session_id) + '/statements/' + str(statement_id), headers=headers).json()
state = response['state']
if state == 'error' or state == 'cancelling' or state == 'cancelled':
raise Exception('Failed')
except Exception:
raise Exception('Failed')
return_val = '[' + str(convert_unicode_json(response['output']['data']['text/plain'].replace("u'{", "{").replace("}',", "},").replace("}']", "}]").replace("'", '"').split('Array(')[1].replace('})', '}').replace('\n', ''))) + ']'
return json.loads(return_val)
def get_result_dict_from_livy(session_id, dict_name):
host = settings.LIVY_URL
headers = {'Content-Type': 'application/json', 'X-Requested-By': 'Admin'}
data = dict()
data['code'] = '\nprint {0}'.format(dict_name)
data['kind'] = 'pyspark'
statements_url = host + '/sessions/{0}/statements'.format(session_id)
response = requests.post(statements_url, data=json.dumps(data), headers=headers).json()
print response
try:
statement_id = response['id']
state = ''
from time import sleep
sleep(3) # Time in seconds.
while state != 'available':
response = requests.get(host + '/sessions/' + str(session_id) + '/statements/' + str(statement_id), headers=headers).json()
state = response['state']
if state == 'error' or state == 'cancelling' or state == 'cancelled':
raise Exception('Failed')
except Exception:
raise Exception('Failed')
print 'result'
print str(response['output']['data'])
return_val = json.loads(str(convert_unicode_json(response['output']['data']['text/plain'])).replace("'", '"'))
print return_val
return return_val
def close_livy_session(session_id):
headers = {'X-Requested-By': 'Admin'}
requests.delete("{0}/sessions/{1}".format(settings.LIVY_URL, session_id), headers=headers)
| mit |
chrisjsewell/ipymd | ipymd/data_output.py | 1 | 5382 | # -*- coding: utf-8 -*-
"""
Created on Mon May 23 17:55:14 2016
@author: cjs14
"""
import os
import numpy as np
import datetime
from ._version import __version__
class Data_Output(object):
"""
"""
def __init__(self, atom_df, abc, origin=np.zeros(3)):
self._atom_df = atom_df.copy()
self._abc = np.array(abc)
self._origin = np.array(origin)
def _save_xyz(self, outpath='out.xyz', overwrite=False,
header=''):
if os.path.exists(outpath) and not overwrite:
raise IOError('file already exists; {0}'.format(outpath))
raise NotImplementedError
def _save_gromacs(self, outpath='out.gro', overwrite=False,
header=''):
if os.path.exists(outpath) and not overwrite:
raise IOError('file already exists; {0}'.format(outpath))
raise NotImplementedError
def save_lammps(self, outpath='out.lammps', overwrite=False,
atom_type='atomic', header='', mass_map={}):
""" to adhere to http://lammps.sandia.gov/doc/read_data.html?highlight=read_data
Parameters
----------
outpath : string
the output file name
overwrite : bool
whether to raise an error if the file already exists
atom_type : str
the lammps atom style, currently supports atomic or charge
header : str
text to put in the header
mass_map : dict
a mapping of atom types to mass
Example
-------
In [1]: import pandas as pd
In [2]: df = pd.DataFrame([['Fe',2,3,4,1],
['Cr',2,3,3,-1],
['Fe',4,3,1,1]],columns=['type','xs','ys','zs','q'])
In [3]: from ipymd.data_output import Data_Output as data_out
In [4]: data = data_out(df, [[1,0,0],[0,1,0],[0,0,1]])
In [5]: data.save_lammps('test.lammps', atom_type='charge', overwrite=True,
header='my header')
In [6]: cat test.lammps
# This file was created by ipymd (v0.0.1) on 2016-05-23 20:51:16
# type map: {'Cr': 2, 'Fe': 1}
# my header
3 atoms
2 atom types
# simulation box boundaries
0.0000 1.0000 xlo xhi
0.0000 1.0000 ylo yhi
0.0000 1.0000 zlo zhi
0.0000 0.0000 0.0000 xy xz yz
Atoms
1 1 1.0000 2.0000 3.0000 4.0000
2 2 -1.0000 2.0000 3.0000 3.0000
3 1 1.0000 4.0000 3.0000 1.0000
"""
if os.path.exists(outpath) and not overwrite:
raise IOError('file already exists; {0}'.format(outpath))
assert atom_type in ['atomic', 'charge']
xlo, ylo, zlo = self._origin
a, b, c = self._abc
xhi = a[0] + xlo
xy = b[0]
yhi = b[1] + ylo
xz = c[0]
yz = c[1]
zhi = c[2] + zlo
#xlo_bound = xlo + min(0.0,xy,xz,xy+xz)
#xhi_bound = xhi + max(0.0,xy,xz,xy+xz)
#ylo_bound = ylo + min(0.0,yz)
#yhi_bound = yhi + max(0.0,yz)
#zlo_bound = zlo
#zhi_bound = zhi
num_atoms = self._atom_df.shape[0]
types = self._atom_df['type'].unique()
num_types = len(types)
type_map = dict(zip(types, [i+1 for i in range(len(types))]))
if mass_map:
assert sorted(mass_map.keys()) == sorted(type_map.keys())
with open(outpath, 'w+') as f:
# header comments
f.write('# This file was created by ipymd (v{0}) on {1} \n'.format(
__version__, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
f.write('# type map: {0} \n'.format(type_map))
f.write('# {0} \n'.format(header))
# header
f.write('\n')
f.write('{0} atoms \n'.format(num_atoms))
f.write('{0} atom types \n'.format(num_types))
f.write('\n')
f.write('# simulation box boundaries\n')
f.write('{0:.4f} {1:.4f} xlo xhi \n'.format(xlo, xhi))
f.write('{0:.4f} {1:.4f} ylo yhi \n'.format(ylo, yhi))
f.write('{0:.4f} {1:.4f} zlo zhi \n'.format(zlo, zhi))
f.write('{0:.4f} {1:.4f} {1:.4f} xy xz yz \n'.format(xy, xz, yz))
f.write('\n')
if mass_map:
f.write('Masses\n')
f.write('\n')
for atype, mass in mass_map.iteritems():
f.write('{0} {1:.4f}\n'.format(type_map[atype],mass))
f.write('\n')
# body
f.write('Atoms \n')
f.write('\n')
for i, (ix, s) in enumerate(self._atom_df.iterrows()):
if atom_type == 'atomic':
f.write('{0} {1} {3:.4f} {4:.4f} {5:.4f} \n'.format(
i+1, type_map[s.type], *s[['x','y','z']].values))
elif atom_type == 'charge':
f.write('{0} {1} {2:.4f} {3:.4f} {4:.4f} {5:.4f} \n'.format(
i+1, type_map[s.type], *s[['q','x','y','z']].values))
| gpl-3.0 |
rgerkin/inspyred | inspyred/ec/observers.py | 2 | 18199 | """
================================================
:mod:`observers` -- Algorithm monitoring methods
================================================
This module provides pre-defined observers for evolutionary computations.
All observer functions have the following arguments:
- *population* -- the population of Individuals
- *num_generations* -- the number of elapsed generations
- *num_evaluations* -- the number of candidate solution evaluations
- *args* -- a dictionary of keyword arguments
.. note::
The *population* is really a shallow copy of the actual population of
the evolutionary computation. This means that any activities like
sorting will not affect the actual population.
.. Copyright 2012 Inspired Intelligence Initiative
.. This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
.. This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
.. You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
.. module:: observers
.. moduleauthor:: Aaron Garrett <aaron.lee.garrett@gmail.com>
"""
import email
import inspyred
import math
import os
import smtplib
import time
def default_observer(population, num_generations, num_evaluations, args):
"""Do nothing."""
pass
def best_observer(population, num_generations, num_evaluations, args):
"""Print the best individual in the population to the screen.
This function displays the best individual in the population to
the screen.
.. Arguments:
population -- the population of Individuals
num_generations -- the number of elapsed generations
num_evaluations -- the number of candidate solution evaluations
args -- a dictionary of keyword arguments
"""
print("Best Individual: {0}\n".format(str(max(population))))
def stats_observer(population, num_generations, num_evaluations, args):
"""Print the statistics of the evolutionary computation to the screen.
This function displays the statistics of the evolutionary computation
to the screen. The output includes the generation number, the current
number of evaluations, the maximum fitness, the minimum fitness,
the average fitness, and the standard deviation.
.. note::
This function makes use of the ``inspyred.ec.analysis.fitness_statistics``
function, so it is subject to the same requirements.
.. Arguments:
population -- the population of Individuals
num_generations -- the number of elapsed generations
num_evaluations -- the number of candidate solution evaluations
args -- a dictionary of keyword arguments
"""
stats = inspyred.ec.analysis.fitness_statistics(population)
worst_fit = '{0:>10}'.format(stats['worst'])[:10]
best_fit = '{0:>10}'.format(stats['best'])[:10]
avg_fit = '{0:>10}'.format(stats['mean'])[:10]
med_fit = '{0:>10}'.format(stats['median'])[:10]
std_fit = '{0:>10}'.format(stats['std'])[:10]
print('Generation Evaluation Worst Best Median Average Std Dev')
print('---------- ---------- ---------- ---------- ---------- ---------- ----------')
print('{0:>10} {1:>10} {2:>10} {3:>10} {4:>10} {5:>10} {6:>10}\n'.format(num_generations,
num_evaluations,
worst_fit,
best_fit,
med_fit,
avg_fit,
std_fit))
def population_observer(population, num_generations, num_evaluations, args):
"""Print the current population of the evolutionary computation to the screen.
This function displays the current population of the evolutionary
computation to the screen in fitness-sorted order.
.. Arguments:
population -- the population of Individuals
num_generations -- the number of elapsed generations
num_evaluations -- the number of candidate solution evaluations
args -- a dictionary of keyword arguments
"""
population.sort(reverse=True)
print('----------------------------------------------------------------------------')
print(' Current Population')
print('----------------------------------------------------------------------------')
for ind in population:
print(str(ind))
print('----------------------------------------------------------------------------')
def file_observer(population, num_generations, num_evaluations, args):
"""Print the output of the evolutionary computation to a file.
This function saves the results of the evolutionary computation
to two files. The first file, which by default is named
'inspyred-statistics-file-<timestamp>.csv', contains the basic
generational statistics of the population throughout the run
(worst, best, median, and average fitness and standard deviation
of the fitness values). The second file, which by default is named
'inspyred-individuals-file-<timestamp>.csv', contains every individual
during each generation of the run. Both files may be passed to the
function as keyword arguments (see below).
The format of each line of the statistics file is as follows::
generation number, population size, worst, best, median, average, standard deviation
The format of each line of the individuals file is as follows::
generation number, individual number, fitness, string representation of candidate
.. note::
This function makes use of the ``inspyred.ec.analysis.fitness_statistics``
function, so it is subject to the same requirements.
.. Arguments:
population -- the population of Individuals
num_generations -- the number of elapsed generations
num_evaluations -- the number of candidate solution evaluations
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *statistics_file* -- a file object (default: see text)
- *individuals_file* -- a file object (default: see text)
"""
try:
statistics_file = args['statistics_file']
except KeyError:
statistics_file = open('inspyred-statistics-file-{0}.csv'.format(time.strftime('%m%d%Y-%H%M%S')), 'w')
args['statistics_file'] = statistics_file
try:
individuals_file = args['individuals_file']
except KeyError:
individuals_file = open('inspyred-individuals-file-{0}.csv'.format(time.strftime('%m%d%Y-%H%M%S')), 'w')
args['individuals_file'] = individuals_file
stats = inspyred.ec.analysis.fitness_statistics(population)
worst_fit = stats['worst']
best_fit = stats['best']
avg_fit = stats['mean']
med_fit = stats['median']
std_fit = stats['std']
statistics_file.write('{0}, {1}, {2}, {3}, {4}, {5}, {6}\n'.format(num_generations, len(population), worst_fit, best_fit, med_fit, avg_fit, std_fit))
for i, p in enumerate(population):
individuals_file.write('{0}, {1}, {2}, {3}\n'.format(num_generations, i, p.fitness, str(p.candidate)))
statistics_file.flush()
individuals_file.flush()
def archive_observer(population, num_generations, num_evaluations, args):
"""Print the current archive to the screen.
This function displays the current archive of the evolutionary
computation to the screen.
.. Arguments:
population -- the population of Individuals
num_generations -- the number of elapsed generations
num_evaluations -- the number of candidate solution evaluations
args -- a dictionary of keyword arguments
"""
archive = args['_ec'].archive
print('----------------------------------------------------------------------------')
print(' Archive ({0:5} individuals)'.format(len(archive)))
print('----------------------------------------------------------------------------')
for a in archive:
print(a)
print('----------------------------------------------------------------------------')
class EmailObserver(object):
"""Email the population statistics, individuals, and optional file observer data.
This callable class allows information about the current generation
to be emailed to a user. This is useful when dealing with computationally
expensive optimization problems where the evolution must progress over
hours or days. The ``generation_step`` attribute can be set to an integer
greater than 1 to ensure that emails are only sent on generations that are
multiples of the step size.
.. note::
This function makes use of the ``inspyred.ec.analysis.fitness_statistics``
function, so it is subject to the same requirements.
A typical instantiation of this class would be the following::
import getpass
usr = raw_input("Enter your username: ")
pwd = getpass.getpass("Enter your password: ")
email_observer = EmailObserver(usr, pwd, "my.mail.server")
email_observer.from_address = "me@here.com"
email_observer.to_address = "you@there.com" # or ["you@there.com", "other@somewhere.com"]
email_observer.subject = "My custom subject"
email_observer.generation_step = 10 # Send an email every 10th generation
Public Attributes:
- *username* -- the mail server username
- *password* -- the mail server password
- *server* -- the mail server URL or IP address string
- *port* -- the mail server port as an integer
- *from_address* -- the email address of the sender
- *to_address* -- the (possibly list of) email address(es) of the receiver(s)
- *subject* -- the subject of the email (default 'inspyred observer report')
- *max_attachment* -- the maximum allowable size, in MB, of attachments
(default 20 MB)
- *generation_step* -- the step size for when a generation's information
should be emailed (default 1)
"""
def __init__(self, username, password, server, port=587):
self.username = username
self.password = password
self.server = server
self.port = port
self.generation_step = 1
self.max_attachment = 20
self.subject = "inspyred observer report"
self.__name__ = self.__class__.__name__
def _send_mail(self, fromaddr, toaddr, subject, text, attachments=None):
if not isinstance(toaddr, (list, tuple)):
toaddr = [toaddr]
msg = email.MIMEMultipart.MIMEMultipart('related')
msg['From'] = fromaddr
msg['To'] = ','.join(toaddr)
msg['Subject'] = subject
body = email.MIMEMultipart.MIMEMultipart('alternative')
body.attach(email.MIMEText.MIMEText(text, 'plain'))
html = '<html><body><tt>{0}</tt></body></html>'.format(text.replace(' ', ' ').replace('\n', '<br/>'))
body.attach(email.MIMEText.MIMEText(html, 'html'))
msg.attach(body)
if attachments is not None:
if not isinstance(attachments, (list, tuple)):
attachments = [attachments]
for file in attachments:
part = email.MIMEBase.MIMEBase('application', 'octet-stream')
fp = open(file, 'rb')
part.set_payload(fp.read())
fp.close()
email.Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="{0}"'.format(os.path.basename(file)))
msg.attach(part)
mail_server = smtplib.SMTP(self.server, self.port)
mail_server.ehlo()
mail_server.starttls()
mail_server.ehlo()
mail_server.login(self.username, self.password)
mail_server.sendmail(fromaddr, toaddr, msg.as_string())
mail_server.quit()
def __call__(self, population, num_generations, num_evaluations, args):
if num_generations % self.generation_step == 0:
stats = inspyred.ec.analysis.fitness_statistics(population)
worst_fit = '{0:>10}'.format(stats['worst'])[:10]
best_fit = '{0:>10}'.format(stats['best'])[:10]
avg_fit = '{0:>10}'.format(stats['mean'])[:10]
med_fit = '{0:>10}'.format(stats['median'])[:10]
std_fit = '{0:>10}'.format(stats['std'])[:10]
body = 'Generation Evaluation Worst Best Median Average Std Dev\n'
body += '---------- ---------- ---------- ---------- ---------- ---------- ----------\n'
body += '{0:>10} {1:>10} {2:>10} {3:>10} {4:>10} {5:>10} {6:>10}\n'.format(num_generations,
num_evaluations,
worst_fit,
best_fit,
med_fit,
avg_fit,
std_fit)
body += '----------------------------------------------------------------------------\n'
for p in population:
body += str(p) + '\n'
body += '----------------------------------------------------------------------------\n'
total_size = 0
files = []
stats = args.get("statistics_file", None)
inds = args.get("individuals_file", None)
for file in [stats, inds]:
if file is not None:
files.append(file.name)
total_size += os.path.getsize(file.name)
if total_size > (self.max_attachment * 1048576):
files = None
self._send_mail(self.from_address, self.to_address, self.subject, body, files)
def plot_observer(population, num_generations, num_evaluations, args):
"""Plot the output of the evolutionary computation as a graph.
This function plots the performance of the EC as a line graph
using the pylab library (matplotlib) and numpy. The graph consists of a
blue line representing the best fitness, a green line representing
the average fitness, and a red line representing the median fitness.
It modifies the keyword arguments variable 'args' by including an
entry called 'plot_data'.
If this observer is used, the calling script should also import
the pylab library and should end the script with::
pylab.show()
Otherwise, the program may generate a runtime error.
.. note::
This function makes use of the pylab and numpy libraries.
.. Arguments:
population -- the population of Individuals
num_generations -- the number of elapsed generations
num_evaluations -- the number of candidate solution evaluations
args -- a dictionary of keyword arguments
"""
import pylab
import numpy
stats = inspyred.ec.analysis.fitness_statistics(population)
best_fitness = stats['best']
worst_fitness = stats['worst']
median_fitness = stats['median']
average_fitness = stats['mean']
colors = ['black', 'blue', 'green', 'red']
labels = ['average', 'median', 'best', 'worst']
data = []
if num_generations == 0:
pylab.ion()
data = [[num_evaluations], [average_fitness], [median_fitness], [best_fitness], [worst_fitness]]
lines = []
for i in range(4):
line, = pylab.plot(data[0], data[i+1], color=colors[i], label=labels[i])
lines.append(line)
# Add the legend when the first data is added.
pylab.legend(loc='lower right')
args['plot_data'] = data
args['plot_lines'] = lines
pylab.xlabel('Evaluations')
pylab.ylabel('Fitness')
else:
data = args['plot_data']
data[0].append(num_evaluations)
data[1].append(average_fitness)
data[2].append(median_fitness)
data[3].append(best_fitness)
data[4].append(worst_fitness)
lines = args['plot_lines']
for i, line in enumerate(lines):
line.set_xdata(numpy.array(data[0]))
line.set_ydata(numpy.array(data[i+1]))
args['plot_data'] = data
args['plot_lines'] = lines
ymin = min([min(d) for d in data[1:]])
ymax = max([max(d) for d in data[1:]])
yrange = ymax - ymin
pylab.xlim((0, num_evaluations))
pylab.ylim((ymin - 0.1*yrange, ymax + 0.1*yrange))
pylab.draw()
| gpl-3.0 |
altairpearl/scikit-learn | examples/gaussian_process/plot_gpr_noisy_targets.py | 64 | 3706 | """
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression example computed in two different ways:
1. A noise-free case
2. A noisy case with known noise-level per datapoint
In both cases, the kernel's parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``alpha`` is applied as a Tikhonov
regularization of the assumed covariance between the training points.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>s
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
# ----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'r.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'b-', label=u'Prediction')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
# ----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Instanciate a Gaussian Process model
gp = GaussianProcessRegressor(kernel=kernel, alpha=(dy / y) ** 2,
n_restarts_optimizer=10)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'b-', label=u'Prediction')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
fredhusser/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
WuShichao/computational-physics | 2/2_11/2_11.py | 1 | 2915 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 17:48:55 2016
初始速度的微小变动和微风对弹道的影响
@author: nightwing
"""
from math import cos,sin,sqrt,pi
import matplotlib.pyplot as plt
g = 9.8 #重力加速度(m/s2)
dt = 0.01 #时间间隔(s)
v0 = 700.0 #初始速度(m/s)
v_wind = 25/9.0 #风速(m/s)
k = 4*10**(-5) #B2/m(m-1)
y0 = 10e4 #y0=kbT/mg(m)
a = 6.5*10**(-3) #K/m
alpha = 2.5 #指数
T0 = 300 #海平面处的温度(K)
trajectory = [] #此列表存储弹道
#-------欧拉法计算炮弹轨迹--------
theta = 45
theta *= (pi/180)
#V0=700m/s
t = 0
x = 0.0
y = 0.0
displacement_x = []
displacement_y = []
vx = v0 * cos(theta)
vy = v0 * sin(theta)
while y >= 0:
displacement_x.append(x/1000)
displacement_y.append(y/1000)
v = sqrt(vx**2 + vy**2)
x += vx * dt
y += vy * dt
vx -= k*(1-a*y/T0)**alpha*v*vx*dt
vy -= (g+k*(1-a*y/T0)**alpha*v*vy) * dt
t += dt
trajectory.append([displacement_x,displacement_y])
#V0 changes 1%
t = 0
x = 0.0
y = 0.0
displacement_x = []
displacement_y = []
vx = v0 * 1.01 * cos(theta)
vy = v0 * 1.01 * sin(theta)
while y >= 0:
displacement_x.append(x/1000)
displacement_y.append(y/1000)
v = sqrt(vx**2 + vy**2)
x += vx * dt
y += vy * dt
vx -= k*(1-a*y/T0)**alpha*v*vx*dt
vy -= (g+k*(1-a*y/T0)**alpha*v*vy) * dt
t += dt
trajectory.append([displacement_x,displacement_y])
#V0=700m/s with a slightly (10km/h) tailwind
t = 0
x = 0.0
y = 0.0
displacement_x = []
displacement_y = []
vx = v0 * cos(theta)
vy = v0 * sin(theta)
while y >= 0:
displacement_x.append(x/1000)
displacement_y.append(y/1000)
v = sqrt(vx**2 + vy**2)
x += vx * dt
y += vy * dt
vx -= k*(1-a*y/T0)**alpha*abs(v-v_wind)*(vx-v_wind)*dt
vy -= (g+k*(1-a*y/T0)**alpha*abs(v-v_wind)*vy) * dt
t += dt
trajectory.append([displacement_x,displacement_y])
#V0=700m/s with a slightly (10km/h) headwind
t = 0
x = 0.0
y = 0.0
displacement_x = []
displacement_y = []
vx = v0 * cos(theta)
vy = v0 * sin(theta)
while y >= 0:
displacement_x.append(x/1000)
displacement_y.append(y/1000)
v = sqrt(vx**2 + vy**2)
x += vx * dt
y += vy * dt
vx -= k*(1-a*y/T0)**alpha*abs(v+v_wind)*(vx+v_wind)*dt
vy -= (g+k*(1-a*y/T0)**alpha*abs(v+v_wind)*vy) * dt
t += dt
trajectory.append([displacement_x,displacement_y])
#------------------绘图---------------------
plt.title("Trajectory of cannon shell")
plt.xlabel("x (km)")
plt.ylabel("y (km)")
plt.plot(trajectory[0][0],trajectory[0][1],"k--",label="no wind")
plt.plot(trajectory[1][0],trajectory[1][1],"k-",label="V0 changes 1%")
plt.plot(trajectory[2][0],trajectory[2][1],"g--",label="tailwind")
plt.plot(trajectory[3][0],trajectory[3][1],"g-",label="headwind")
plt.xlim(0,30)
plt.ylim(0,10)
plt.legend()
plt.show() | gpl-3.0 |
pnedunuri/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 133 | 3517 | """
========================================================================================
Topics extraction with Non-Negative Matrix Factorization And Latent Dirichlet Allocation
========================================================================================
This is an example of applying Non Negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
# use tf-idf feature for NMF model
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(data_samples)
# use tf feature for LDA model
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("\nFitting LDA models with tf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
elkingtonmcb/shogun | examples/undocumented/python_modular/graphical/group_lasso.py | 26 | 7792 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import rand, randn, permutation, multivariate_normal
from modshogun import BinaryLabels, RealFeatures, IndexBlock, IndexBlockGroup, FeatureBlockLogisticRegression
def generate_synthetic_logistic_data(n, p, L, blk_nnz, gcov, nstd):
# Generates synthetic data for the logistic regression, using the example
# from [Friedman10]
# n : # of observations
# p : # of predictors
# L : # of blocks
# blk_nnz : # of non-zero coefs. in each block
# gcov : correlation within groups
# nstd : standard deviation of the added noise
# size of each block (assumed to be an integer)
pl = p / L
# generating the coefficients (betas)
coefs = np.zeros((p, 1))
for (i, nnz) in enumerate(blk_nnz):
blkcoefs = np.zeros((pl, 1))
blkcoefs[0:nnz] = np.sign(rand(nnz, 1) - 0.5)
coefs[pl * i:pl * (i + 1)] = permutation(blkcoefs)
# generating the predictors
mu = np.zeros(p)
gsigma = gcov * np.ones((pl, pl))
np.fill_diagonal(gsigma, 1.0)
Sigma = np.kron(np.eye(L), gsigma)
# the predictors come from a standard Gaussian multivariate distribution
X = multivariate_normal(mu, Sigma, n)
# linear function of the explanatory variables in X, plus noise
t = np.dot(X, coefs) + randn(n, 1) * nstd
# applying the logit
Pr = 1 / (1 + np.exp(-t))
# The response variable y[i] is a Bernoulli random variable taking
# value 1 with probability Pr[i]
y = rand(n, 1) <= Pr
# we want each _column_ in X to represent a feature vector
# y and coefs should be also 1D arrays
return X.T, y.flatten(), coefs.flatten()
def misclassified_groups(est_coefs, true_coefs, L):
# Compute the number of groups that are misclassified, i.e. the ones with
# at least one non-zero coefficient whose estimated coefficients are all
# set to zero, or viceversa, as explained in [Friedman10]
# est_coefs : coefficients estimated by the FBLR
# true_coefs : the original coefficients of our synthetic example
# L : number of blocks
p = est_coefs.shape[0] # number of predictors
pl = p / L
est_nz = est_coefs != 0
true_nz = true_coefs != 0
est_blk_nzcount = np.array([sum(est_nz[pl * i:pl * (i + 1)]) for i in xrange(L)])
true_blk_nzcount = np.array([sum(true_nz[pl * i:pl * (i + 1)]) for i in xrange(L)])
return np.sum(np.logical_xor(est_blk_nzcount == 0, true_blk_nzcount == 0))
def misclassified_features(est_coefs, true_coefs):
# Compute the number of individual coefficients that are misclassified,
# i.e. estimated to be zero when the true coefficient is nonzero or
# vice-versa, as explained in [Friedman10]
# est_coefs : coefficients estimated by the FBLR
# true_coefs : the original coefficients of our synthetic example
return np.sum(np.logical_xor(est_coefs == 0, true_coefs == 0))
def compute_misclassifications(cls, true_coefs, L, rel_z):
# Try the given classifier with different values of relative regularization
# parameters, store the coefficients and compute the number of groups
# and features misclassified.
# INPUTS:
# - cls : the classifier to try
# - true_coefs : the original coefficients of our synthetic example
# - L : number of blocks
# - rel_z : regularization values to try, they will be in [0,1]
# OUTPUTS:
# - est_coefs : array with the estimated coefficients, each row for a
# different value of regularization
# - misc_groups, misc_feats : see above
num_z = rel_z.shape[0]
est_coefs = np.zeros((num_z, true_coefs.shape[0]))
misc_groups = np.zeros(num_z)
misc_feats = np.zeros(num_z)
for (i, z) in enumerate(rel_z):
cls.set_z(z)
cls.train()
est_coefs[i, :] = cls.get_w()
misc_groups[i] = misclassified_groups(est_coefs[i, :], true_coefs, L)
misc_feats[i] = misclassified_features(est_coefs[i, :], true_coefs)
return est_coefs, misc_groups, misc_feats
if __name__ == '__main__':
print('FeatureBlockLogisticRegression example')
np.random.seed(956) # reproducible results
# default parameters from [Friedman10]
n = 200
p = 100
L = 10
blk_nnz = [10, 8, 6, 4, 2, 1]
gcov = 0.2
nstd = 0.4
# range of (relative) regularization values to try
min_z = 0
max_z = 1
num_z = 21
# get the data
X, y, true_coefs = generate_synthetic_logistic_data(n, p, L, blk_nnz, gcov, nstd)
# here each column represents a feature vector
features = RealFeatures(X)
# we have to convert the labels to +1/-1
labels = BinaryLabels(np.sign(y.astype(int) - 0.5))
# SETTING UP THE CLASSIFIERS
# CLASSIFIER 1: group LASSO
# build the feature blocks and add them to the block group
pl = p / L
block_group = IndexBlockGroup()
for i in xrange(L):
block_group.add_block(IndexBlock(pl * i, pl * (i + 1)))
cls_gl = FeatureBlockLogisticRegression(0.0, features, labels, block_group)
# with set_regularization(1), the parameter z will indicate the fraction of
# the maximum regularization to use, and so z is in [0,1]
# (reference: SLEP manual)
cls_gl.set_regularization(1)
cls_gl.set_q(2.0) # it is the default anyway...
# CLASSIFIER 2: LASSO (illustrating group lasso with all group sizes = 1)
block_group_ones = IndexBlockGroup()
for i in xrange(p):
block_group_ones.add_block(IndexBlock(i, i + 1))
cls_l = FeatureBlockLogisticRegression(0.0, features, labels, block_group_ones)
cls_l.set_regularization(1)
cls_l.set_q(2.0)
# trying with different values of (relative) regularization parameters
rel_z = np.linspace(min_z, max_z, num_z)
coefs_gl, miscgp_gl, miscft_gl = compute_misclassifications(cls_gl, true_coefs, L, rel_z)
coefs_l, miscgp_l, miscft_l = compute_misclassifications(cls_l, true_coefs, L, rel_z)
# Find the best regularization for each classifier
# for the group lasso: the one that gives the fewest groups misclassified
best_z_gl = np.argmin(miscgp_gl)
# for the lasso: the one that gives the fewest features misclassified
best_z_l = np.argmin(miscft_l)
# plot the true coefs. and the signs of the estimated coefs.
fig = plt.figure()
for (coefs, best_z, name, pos) in zip([coefs_gl, coefs_l], [best_z_gl, best_z_l], ['Group lasso', 'Lasso'], [0, 1]):
ax = plt.subplot2grid((4, 2), (pos, 0), colspan=2)
plt.hold(True)
plt.plot(xrange(p), np.sign(coefs[best_z, :]), 'o', markeredgecolor='none', markerfacecolor='g')
plt.plot(xrange(p), true_coefs, '^', markersize=7, markeredgecolor='r', markerfacecolor='none', markeredgewidth=1)
plt.xticks(xrange(0, p + pl, pl))
plt.yticks([-1, 0, 1])
plt.xlim((-1, p + 1))
plt.ylim((-2, 2))
plt.grid(True)
# plt.legend(('estimated', 'true'), loc='best')
plt.title(name)
plt.xlabel('Predictor [triangles=true coefs], best reg. value = %.2f' % rel_z[best_z])
plt.ylabel('Coefficient')
ax = plt.subplot2grid((4, 2), (2, 0), rowspan=2)
plt.plot(rel_z, miscgp_gl, 'ro-', rel_z, miscgp_l, 'bo-')
plt.legend(('Group lasso', 'Lasso'), loc='best')
plt.title('Groups misclassified')
plt.xlabel('Relative regularization parameter')
plt.ylabel('# of groups misclassified')
ax = plt.subplot2grid((4, 2), (2, 1), rowspan=2)
plt.plot(rel_z, miscft_gl, 'ro-', rel_z, miscft_l, 'bo-')
plt.legend(('Group lasso', 'Lasso'), loc='best')
plt.title('Features misclassified')
plt.xlabel('Relative regularization parameter')
plt.ylabel('# of features misclassified')
plt.tight_layout(1.2, 0, 0)
plt.show()
| gpl-3.0 |
lakehanne/ensenso | ensenso_detect/manikin/utils/capgen.py | 1 | 56722 | '''
Source code for an attention based image caption generation system described
in:
Show, Attend and Tell: Neural Image Caption Generation with Visual Attention
International Conference for Machine Learning (2015)
http://arxiv.org/abs/1502.03044
Comments in square brackets [] indicate references to the equations/
more detailed explanations in the above paper.
'''
import theano
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import cPickle as pkl
import numpy
import copy
import os
import time
from collections import OrderedDict
from sklearn.cross_validation import KFold
import warnings
# [see Section (4.3) for explanation]
from homogeneous_data import HomogeneousData
# supported optimizers
from optimizers import adadelta, adam, rmsprop, sgd
# dataset iterators
import flickr8k
import flickr30k
import coco
# datasets: 'name', 'load_data: returns iterator', 'prepare_data: some preprocessing'
datasets = {'flickr8k': (flickr8k.load_data, flickr8k.prepare_data),
'flickr30k': (flickr30k.load_data, flickr30k.prepare_data),
'coco': (coco.load_data, coco.prepare_data)}
def get_dataset(name):
return datasets[name][0], datasets[name][1]
'''
Theano uses shared variables for parameters, so to
make this code more portable, these two functions
push and pull variables between a shared
variable dictionary and a regular numpy
dictionary
'''
# push parameters to Theano shared variables
def zipp(params, tparams):
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
# pull parameters from Theano shared variables
def unzip(zipped):
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
# get the list of parameters: Note that tparams must be OrderedDict
def itemlist(tparams):
return [vv for kk, vv in tparams.iteritems()]
# dropout in theano
def dropout_layer(state_before, use_noise, trng):
"""
tensor switch is like an if statement that checks the
value of the theano shared variable (use_noise), before
either dropping out the state_before tensor or
computing the appropriate activation. During training/testing
use_noise is toggled on and off.
"""
proj = tensor.switch(use_noise,
state_before *
trng.binomial(state_before.shape, p=0.5, n=1, dtype=state_before.dtype),
state_before * 0.5)
return proj
# make prefix-appended name
def _p(pp, name):
return '%s_%s' % (pp, name)
# initialize Theano shared variables according to the initial parameters
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
# load parameters
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
raise Warning('%s is not in the archive' % kk)
params[kk] = pp[kk]
return params
# some utilities
def ortho_weight(ndim):
"""
Random orthogonal weights
Used by norm_weights(below), in which case, we
are ensuring that the rows are orthogonal
(i.e W = U \Sigma V, U has the same
# of rows, V has the same # of cols)
"""
W = numpy.random.randn(ndim, ndim)
u, _, _ = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin,nout=None, scale=0.01, ortho=True):
"""
Random weights drawn from a Gaussian
"""
if nout is None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
# some useful shorthands
def tanh(x):
return tensor.tanh(x)
def rectifier(x):
return tensor.maximum(0., x)
def linear(x):
return x
"""
Neural network layer definitions.
The life-cycle of each of these layers is as follows
1) The param_init of the layer is called, which creates
the weights of the network.
2) The fprop is called which builds that part of the Theano graph
using the weights created in step 1). This automatically links
these variables to the graph.
Each prefix is used like a key and should be unique
to avoid naming conflicts when building the graph.
"""
# layers: 'name': ('parameter initializer', 'fprop')
layers = {'ff': ('param_init_fflayer', 'fflayer'),
'lstm': ('param_init_lstm', 'lstm_layer'),
'lstm_cond': ('param_init_lstm_cond', 'lstm_cond_layer'),
}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# feedforward layer: affine transformation + point-wise nonlinearity
def param_init_fflayer(options, params, prefix='ff', nin=None, nout=None):
if nin is None:
nin = options['dim_proj']
if nout is None:
nout = options['dim_proj']
params[_p(prefix, 'W')] = norm_weight(nin, nout, scale=0.01)
params[_p(prefix, 'b')] = numpy.zeros((nout,)).astype('float32')
return params
def fflayer(tparams, state_below, options, prefix='rconv', activ='lambda x: tensor.tanh(x)', **kwargs):
return eval(activ)(tensor.dot(state_below, tparams[_p(prefix,'W')])+tparams[_p(prefix,'b')])
# LSTM layer
def param_init_lstm(options, params, prefix='lstm', nin=None, dim=None):
if nin is None:
nin = options['dim_proj']
if dim is None:
dim = options['dim_proj']
"""
Stack the weight matricies for all the gates
for much cleaner code and slightly faster dot-prods
"""
# input weights
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
# for the previous hidden activation
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
params[_p(prefix,'b')] = numpy.zeros((4 * dim,)).astype('float32')
return params
# This function implements the lstm fprop
def lstm_layer(tparams, state_below, options, prefix='lstm', mask=None, **kwargs):
nsteps = state_below.shape[0]
dim = tparams[_p(prefix,'U')].shape[0]
# if we are dealing with a mini-batch
if state_below.ndim == 3:
n_samples = state_below.shape[1]
init_state = tensor.alloc(0., n_samples, dim)
init_memory = tensor.alloc(0., n_samples, dim)
# during sampling
else:
n_samples = 1
init_state = tensor.alloc(0., dim)
init_memory = tensor.alloc(0., dim)
# if we have no mask, we assume all the inputs are valid
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
# use the slice to calculate all the different gates
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
elif _x.ndim == 2:
return _x[:, n*dim:(n+1)*dim]
return _x[n*dim:(n+1)*dim]
# one time step of the lstm
def _step(m_, x_, h_, c_):
preact = tensor.dot(h_, tparams[_p(prefix, 'U')])
preact += x_
i = tensor.nnet.sigmoid(_slice(preact, 0, dim))
f = tensor.nnet.sigmoid(_slice(preact, 1, dim))
o = tensor.nnet.sigmoid(_slice(preact, 2, dim))
c = tensor.tanh(_slice(preact, 3, dim))
c = f * c_ + i * c
h = o * tensor.tanh(c)
return h, c, i, f, o, preact
state_below = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
rval, updates = theano.scan(_step,
sequences=[mask, state_below],
outputs_info=[init_state, init_memory, None, None, None, None],
name=_p(prefix, '_layers'),
n_steps=nsteps, profile=False)
return rval
# Conditional LSTM layer with Attention
def param_init_lstm_cond(options, params, prefix='lstm_cond', nin=None, dim=None, dimctx=None):
if nin is None:
nin = options['dim']
if dim is None:
dim = options['dim']
if dimctx is None:
dimctx = options['dim']
# input to LSTM, similar to the above, we stack the matricies for compactness, do one
# dot product, and use the slice function below to get the activations for each "gate"
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
# LSTM to LSTM
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
# bias to LSTM
params[_p(prefix,'b')] = numpy.zeros((4 * dim,)).astype('float32')
# context to LSTM
Wc = norm_weight(dimctx,dim*4)
params[_p(prefix,'Wc')] = Wc
# attention: context -> hidden
Wc_att = norm_weight(dimctx, ortho=False)
params[_p(prefix,'Wc_att')] = Wc_att
# attention: LSTM -> hidden
Wd_att = norm_weight(dim,dimctx)
params[_p(prefix,'Wd_att')] = Wd_att
# attention: hidden bias
b_att = numpy.zeros((dimctx,)).astype('float32')
params[_p(prefix,'b_att')] = b_att
# optional "deep" attention
if options['n_layers_att'] > 1:
for lidx in xrange(1, options['n_layers_att']):
params[_p(prefix,'W_att_%d'%lidx)] = ortho_weight(dimctx)
params[_p(prefix,'b_att_%d'%lidx)] = numpy.zeros((dimctx,)).astype('float32')
# attention:
U_att = norm_weight(dimctx,1)
params[_p(prefix,'U_att')] = U_att
c_att = numpy.zeros((1,)).astype('float32')
params[_p(prefix, 'c_tt')] = c_att
if options['selector']:
# attention: selector
W_sel = norm_weight(dim, 1)
params[_p(prefix, 'W_sel')] = W_sel
b_sel = numpy.float32(0.)
params[_p(prefix, 'b_sel')] = b_sel
return params
def lstm_cond_layer(tparams, state_below, options, prefix='lstm',
mask=None, context=None, one_step=False,
init_memory=None, init_state=None,
trng=None, use_noise=None, sampling=True,
argmax=False, **kwargs):
assert context, 'Context must be provided'
if one_step:
assert init_memory, 'previous memory must be provided'
assert init_state, 'previous state must be provided'
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
# mask
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
# infer lstm dimension
dim = tparams[_p(prefix, 'U')].shape[0]
# initial/previous state
if init_state is None:
init_state = tensor.alloc(0., n_samples, dim)
# initial/previous memory
if init_memory is None:
init_memory = tensor.alloc(0., n_samples, dim)
# projected context
pctx_ = tensor.dot(context, tparams[_p(prefix,'Wc_att')]) + tparams[_p(prefix, 'b_att')]
if options['n_layers_att'] > 1:
for lidx in xrange(1, options['n_layers_att']):
pctx_ = tensor.dot(pctx_, tparams[_p(prefix,'W_att_%d'%lidx)])+tparams[_p(prefix, 'b_att_%d'%lidx)]
# note to self: this used to be options['n_layers_att'] - 1, so no extra non-linearity if n_layers_att < 3
if lidx < options['n_layers_att']:
pctx_ = tanh(pctx_)
# projected x
# state_below is timesteps*num samples by d in training (TODO change to notation of paper)
# this is n * d during sampling
state_below = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
# additional parameters for stochastic hard attention
if options['attn_type'] == 'stochastic':
# temperature for softmax
temperature = options.get("temperature", 1)
# [see (Section 4.1): Stochastic "Hard" Attention]
semi_sampling_p = options.get("semi_sampling_p", 0.5)
temperature_c = theano.shared(numpy.float32(temperature), name='temperature_c')
h_sampling_mask = trng.binomial((1,), p=semi_sampling_p, n=1, dtype=theano.config.floatX).sum()
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
def _step(m_, x_, h_, c_, a_, as_, ct_, pctx_, dp_=None, dp_att_=None):
""" Each variable is one time slice of the LSTM
m_ - (mask), x_- (previous word), h_- (hidden state), c_- (lstm memory),
a_ - (alpha distribution [eq (5)]), as_- (sample from alpha dist), ct_- (context),
pctx_ (projected context), dp_/dp_att_ (dropout masks)
"""
# attention computation
# [described in equations (4), (5), (6) in
# section "3.1.2 Decoder: Long Short Term Memory Network]
pstate_ = tensor.dot(h_, tparams[_p(prefix,'Wd_att')])
pctx_ = pctx_ + pstate_[:,None,:]
pctx_list = []
pctx_list.append(pctx_)
pctx_ = tanh(pctx_)
alpha = tensor.dot(pctx_, tparams[_p(prefix,'U_att')])+tparams[_p(prefix, 'c_tt')]
alpha_pre = alpha
alpha_shp = alpha.shape
if options['attn_type'] == 'deterministic':
alpha = tensor.nnet.softmax(alpha.reshape([alpha_shp[0],alpha_shp[1]])) # softmax
ctx_ = (context * alpha[:,:,None]).sum(1) # current context
alpha_sample = alpha # you can return something else reasonable here to debug
else:
alpha = tensor.nnet.softmax(temperature_c*alpha.reshape([alpha_shp[0],alpha_shp[1]])) # softmax
# TODO return alpha_sample
if sampling:
alpha_sample = h_sampling_mask * trng.multinomial(pvals=alpha,dtype=theano.config.floatX)\
+ (1.-h_sampling_mask) * alpha
else:
if argmax:
alpha_sample = tensor.cast(tensor.eq(tensor.arange(alpha_shp[1])[None,:],
tensor.argmax(alpha,axis=1,keepdims=True)), theano.config.floatX)
else:
alpha_sample = alpha
ctx_ = (context * alpha_sample[:,:,None]).sum(1) # current context
if options['selector']:
sel_ = tensor.nnet.sigmoid(tensor.dot(h_, tparams[_p(prefix, 'W_sel')])+tparams[_p(prefix,'b_sel')])
sel_ = sel_.reshape([sel_.shape[0]])
ctx_ = sel_[:,None] * ctx_
preact = tensor.dot(h_, tparams[_p(prefix, 'U')])
preact += x_
preact += tensor.dot(ctx_, tparams[_p(prefix, 'Wc')])
# Recover the activations to the lstm gates
# [equation (1)]
i = _slice(preact, 0, dim)
f = _slice(preact, 1, dim)
o = _slice(preact, 2, dim)
if options['use_dropout_lstm']:
i = i * _slice(dp_, 0, dim)
f = f * _slice(dp_, 1, dim)
o = o * _slice(dp_, 2, dim)
i = tensor.nnet.sigmoid(i)
f = tensor.nnet.sigmoid(f)
o = tensor.nnet.sigmoid(o)
c = tensor.tanh(_slice(preact, 3, dim))
# compute the new memory/hidden state
# if the mask is 0, just copy the previous state
c = f * c_ + i * c
c = m_[:,None] * c + (1. - m_)[:,None] * c_
h = o * tensor.tanh(c)
h = m_[:,None] * h + (1. - m_)[:,None] * h_
rval = [h, c, alpha, alpha_sample, ctx_]
if options['selector']:
rval += [sel_]
rval += [pstate_, pctx_, i, f, o, preact, alpha_pre]+pctx_list
return rval
if options['use_dropout_lstm']:
if options['selector']:
_step0 = lambda m_, x_, dp_, h_, c_, a_, as_, ct_, sel_, pctx_: \
_step(m_, x_, h_, c_, a_, as_, ct_, pctx_, dp_)
else:
_step0 = lambda m_, x_, dp_, h_, c_, a_, as_, ct_, pctx_: \
_step(m_, x_, h_, c_, a_, as_, ct_, pctx_, dp_)
dp_shape = state_below.shape
if one_step:
dp_mask = tensor.switch(use_noise,
trng.binomial((dp_shape[0], 3*dim),
p=0.5, n=1, dtype=state_below.dtype),
tensor.alloc(0.5, dp_shape[0], 3 * dim))
else:
dp_mask = tensor.switch(use_noise,
trng.binomial((dp_shape[0], dp_shape[1], 3*dim),
p=0.5, n=1, dtype=state_below.dtype),
tensor.alloc(0.5, dp_shape[0], dp_shape[1], 3*dim))
else:
if options['selector']:
_step0 = lambda m_, x_, h_, c_, a_, as_, ct_, sel_, pctx_: _step(m_, x_, h_, c_, a_, as_, ct_, pctx_)
else:
_step0 = lambda m_, x_, h_, c_, a_, as_, ct_, pctx_: _step(m_, x_, h_, c_, a_, as_, ct_, pctx_)
if one_step:
if options['use_dropout_lstm']:
if options['selector']:
rval = _step0(mask, state_below, dp_mask, init_state, init_memory, None, None, None, None, pctx_)
else:
rval = _step0(mask, state_below, dp_mask, init_state, init_memory, None, None, None, pctx_)
else:
if options['selector']:
rval = _step0(mask, state_below, init_state, init_memory, None, None, None, None, pctx_)
else:
rval = _step0(mask, state_below, init_state, init_memory, None, None, None, pctx_)
return rval
else:
seqs = [mask, state_below]
if options['use_dropout_lstm']:
seqs += [dp_mask]
outputs_info = [init_state,
init_memory,
tensor.alloc(0., n_samples, pctx_.shape[1]),
tensor.alloc(0., n_samples, pctx_.shape[1]),
tensor.alloc(0., n_samples, context.shape[2])]
if options['selector']:
outputs_info += [tensor.alloc(0., n_samples)]
outputs_info += [None,
None,
None,
None,
None,
None,
None] + [None] # *options['n_layers_att']
rval, updates = theano.scan(_step0,
sequences=seqs,
outputs_info=outputs_info,
non_sequences=[pctx_],
name=_p(prefix, '_layers'),
n_steps=nsteps, profile=False)
return rval, updates
# parameter initialization
# [roughly in the same order as presented in section 3.1.2]
def init_params(options):
params = OrderedDict()
# embedding: [matrix E in paper]
params['Wemb'] = norm_weight(options['n_words'], options['dim_word'])
ctx_dim = options['ctx_dim']
if options['lstm_encoder']: # potential feature that runs an LSTM over the annotation vectors
# encoder: LSTM
params = get_layer('lstm')[0](options, params, prefix='encoder',
nin=options['ctx_dim'], dim=options['dim'])
params = get_layer('lstm')[0](options, params, prefix='encoder_rev',
nin=options['ctx_dim'], dim=options['dim'])
ctx_dim = options['dim'] * 2
# init_state, init_cell: [top right on page 4]
for lidx in xrange(1, options['n_layers_init']):
params = get_layer('ff')[0](options, params, prefix='ff_init_%d'%lidx, nin=ctx_dim, nout=ctx_dim)
params = get_layer('ff')[0](options, params, prefix='ff_state', nin=ctx_dim, nout=options['dim'])
params = get_layer('ff')[0](options, params, prefix='ff_memory', nin=ctx_dim, nout=options['dim'])
# decoder: LSTM: [equation (1)/(2)/(3)]
params = get_layer('lstm_cond')[0](options, params, prefix='decoder',
nin=options['dim_word'], dim=options['dim'],
dimctx=ctx_dim)
# potentially deep decoder (warning: should work but somewhat untested)
if options['n_layers_lstm'] > 1:
for lidx in xrange(1, options['n_layers_lstm']):
params = get_layer('ff')[0](options, params, prefix='ff_state_%d'%lidx, nin=options['ctx_dim'], nout=options['dim'])
params = get_layer('ff')[0](options, params, prefix='ff_memory_%d'%lidx, nin=options['ctx_dim'], nout=options['dim'])
params = get_layer('lstm_cond')[0](options, params, prefix='decoder_%d'%lidx,
nin=options['dim'], dim=options['dim'],
dimctx=ctx_dim)
# readout: [equation (7)]
params = get_layer('ff')[0](options, params, prefix='ff_logit_lstm', nin=options['dim'], nout=options['dim_word'])
if options['ctx2out']:
params = get_layer('ff')[0](options, params, prefix='ff_logit_ctx', nin=ctx_dim, nout=options['dim_word'])
if options['n_layers_out'] > 1:
for lidx in xrange(1, options['n_layers_out']):
params = get_layer('ff')[0](options, params, prefix='ff_logit_h%d'%lidx, nin=options['dim_word'], nout=options['dim_word'])
params = get_layer('ff')[0](options, params, prefix='ff_logit', nin=options['dim_word'], nout=options['n_words'])
return params
# build a training model
def build_model(tparams, options, sampling=True):
""" Builds the entire computational graph used for training
[This function builds a model described in Section 3.1.2 onwards
as the convolutional feature are precomputed, some extra features
which were not used are also implemented here.]
Parameters
----------
tparams : OrderedDict
maps names of variables to theano shared variables
options : dict
big dictionary with all the settings and hyperparameters
sampling : boolean
[If it is true, when using stochastic attention, follows
the learning rule described in section 4. at the bottom left of
page 5]
Returns
-------
trng: theano random number generator
Used for dropout, stochastic attention, etc
use_noise: theano shared variable
flag that toggles noise on and off
[x, mask, ctx]: theano variables
Represent the captions, binary mask, and annotations
for a single batch (see dimensions below)
alphas: theano variables
Attention weights
alpha_sample: theano variable
Sampled attention weights used in REINFORCE for stochastic
attention: [see the learning rule in eq (12)]
cost: theano variable
negative log likelihood
opt_outs: OrderedDict
extra outputs required depending on configuration in options
"""
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# description string: #words x #samples,
x = tensor.matrix('x', dtype='int64')
mask = tensor.matrix('mask', dtype='float32')
# context: #samples x #annotations x dim
ctx = tensor.tensor3('ctx', dtype='float32')
n_timesteps = x.shape[0]
n_samples = x.shape[1]
# index into the word embedding matrix, shift it forward in time
emb = tparams['Wemb'][x.flatten()].reshape([n_timesteps, n_samples, options['dim_word']])
emb_shifted = tensor.zeros_like(emb)
emb_shifted = tensor.set_subtensor(emb_shifted[1:], emb[:-1])
emb = emb_shifted
if options['lstm_encoder']:
# encoder
ctx_fwd = get_layer('lstm')[1](tparams, ctx.dimshuffle(1,0,2),
options, prefix='encoder')[0].dimshuffle(1,0,2)
ctx_rev = get_layer('lstm')[1](tparams, ctx.dimshuffle(1,0,2)[:,::-1,:],
options, prefix='encoder_rev')[0][:,::-1,:].dimshuffle(1,0,2)
ctx0 = tensor.concatenate((ctx_fwd, ctx_rev), axis=2)
else:
ctx0 = ctx
# initial state/cell [top right on page 4]
ctx_mean = ctx0.mean(1)
for lidx in xrange(1, options['n_layers_init']):
ctx_mean = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_init_%d'%lidx, activ='rectifier')
if options['use_dropout']:
ctx_mean = dropout_layer(ctx_mean, use_noise, trng)
init_state = get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_state', activ='tanh')
init_memory = get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_memory', activ='tanh')
# lstm decoder
# [equation (1), (2), (3) in section 3.1.2]
attn_updates = []
proj, updates = get_layer('lstm_cond')[1](tparams, emb, options,
prefix='decoder',
mask=mask, context=ctx0,
one_step=False,
init_state=init_state,
init_memory=init_memory,
trng=trng,
use_noise=use_noise,
sampling=sampling)
attn_updates += updates
proj_h = proj[0]
# optional deep attention
if options['n_layers_lstm'] > 1:
for lidx in xrange(1, options['n_layers_lstm']):
init_state = get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_state_%d'%lidx, activ='tanh')
init_memory = get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_memory_%d'%lidx, activ='tanh')
proj, updates = get_layer('lstm_cond')[1](tparams, proj_h, options,
prefix='decoder_%d'%lidx,
mask=mask, context=ctx0,
one_step=False,
init_state=init_state,
init_memory=init_memory,
trng=trng,
use_noise=use_noise,
sampling=sampling)
attn_updates += updates
proj_h = proj[0]
alphas = proj[2]
alpha_sample = proj[3]
ctxs = proj[4]
# [beta value explained in note 4.2.1 "doubly stochastic attention"]
if options['selector']:
sels = proj[5]
if options['use_dropout']:
proj_h = dropout_layer(proj_h, use_noise, trng)
# compute word probabilities
# [equation (7)]
logit = get_layer('ff')[1](tparams, proj_h, options, prefix='ff_logit_lstm', activ='linear')
if options['prev2out']:
logit += emb
if options['ctx2out']:
logit += get_layer('ff')[1](tparams, ctxs, options, prefix='ff_logit_ctx', activ='linear')
logit = tanh(logit)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
if options['n_layers_out'] > 1:
for lidx in xrange(1, options['n_layers_out']):
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit_h%d'%lidx, activ='rectifier')
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
# compute softmax
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit', activ='linear')
logit_shp = logit.shape
probs = tensor.nnet.softmax(logit.reshape([logit_shp[0]*logit_shp[1], logit_shp[2]]))
# Index into the computed probability to give the log likelihood
x_flat = x.flatten()
p_flat = probs.flatten()
cost = -tensor.log(p_flat[tensor.arange(x_flat.shape[0])*probs.shape[1]+x_flat]+1e-8)
cost = cost.reshape([x.shape[0], x.shape[1]])
masked_cost = cost * mask
cost = (masked_cost).sum(0)
# optional outputs
opt_outs = dict()
if options['selector']:
opt_outs['selector'] = sels
if options['attn_type'] == 'stochastic':
opt_outs['masked_cost'] = masked_cost # need this for reinforce later
opt_outs['attn_updates'] = attn_updates # this is to update the rng
return trng, use_noise, [x, mask, ctx], alphas, alpha_sample, cost, opt_outs
# build a sampler
def build_sampler(tparams, options, use_noise, trng, sampling=True):
""" Builds a sampler used for generating from the model
Parameters
----------
See build_model function above
Returns
-------
f_init : theano function
Input: annotation, Output: initial lstm state and memory
(also performs transformation on ctx0 if using lstm_encoder)
f_next: theano function
Takes the previous word/state/memory + ctx0 and runs ne
step through the lstm (used for beam search)
"""
# context: #annotations x dim
ctx = tensor.matrix('ctx_sampler', dtype='float32')
if options['lstm_encoder']:
# encoder
ctx_fwd = get_layer('lstm')[1](tparams, ctx,
options, prefix='encoder')[0]
ctx_rev = get_layer('lstm')[1](tparams, ctx[::-1,:],
options, prefix='encoder_rev')[0][::-1,:]
ctx = tensor.concatenate((ctx_fwd, ctx_rev), axis=1)
# initial state/cell
ctx_mean = ctx.mean(0)
for lidx in xrange(1, options['n_layers_init']):
ctx_mean = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_init_%d'%lidx, activ='rectifier')
if options['use_dropout']:
ctx_mean = dropout_layer(ctx_mean, use_noise, trng)
init_state = [get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_state', activ='tanh')]
init_memory = [get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_memory', activ='tanh')]
if options['n_layers_lstm'] > 1:
for lidx in xrange(1, options['n_layers_lstm']):
init_state.append(get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_state_%d'%lidx, activ='tanh'))
init_memory.append(get_layer('ff')[1](tparams, ctx_mean, options, prefix='ff_memory_%d'%lidx, activ='tanh'))
print 'Building f_init...',
f_init = theano.function([ctx], [ctx]+init_state+init_memory, name='f_init', profile=False)
print 'Done'
# build f_next
ctx = tensor.matrix('ctx_sampler', dtype='float32')
x = tensor.vector('x_sampler', dtype='int64')
init_state = [tensor.matrix('init_state', dtype='float32')]
init_memory = [tensor.matrix('init_memory', dtype='float32')]
if options['n_layers_lstm'] > 1:
for lidx in xrange(1, options['n_layers_lstm']):
init_state.append(tensor.matrix('init_state', dtype='float32'))
init_memory.append(tensor.matrix('init_memory', dtype='float32'))
# for the first word (which is coded with -1), emb should be all zero
emb = tensor.switch(x[:,None] < 0, tensor.alloc(0., 1, tparams['Wemb'].shape[1]),
tparams['Wemb'][x])
proj = get_layer('lstm_cond')[1](tparams, emb, options,
prefix='decoder',
mask=None, context=ctx,
one_step=True,
init_state=init_state[0],
init_memory=init_memory[0],
trng=trng,
use_noise=use_noise,
sampling=sampling)
next_state, next_memory, ctxs = [proj[0]], [proj[1]], [proj[4]]
proj_h = proj[0]
if options['n_layers_lstm'] > 1:
for lidx in xrange(1, options['n_layers_lstm']):
proj = get_layer('lstm_cond')[1](tparams, proj_h, options,
prefix='decoder_%d'%lidx,
context=ctx,
one_step=True,
init_state=init_state[lidx],
init_memory=init_memory[lidx],
trng=trng,
use_noise=use_noise,
sampling=sampling)
next_state.append(proj[0])
next_memory.append(proj[1])
ctxs.append(proj[4])
proj_h = proj[0]
if options['use_dropout']:
proj_h = dropout_layer(proj[0], use_noise, trng)
else:
proj_h = proj[0]
logit = get_layer('ff')[1](tparams, proj_h, options, prefix='ff_logit_lstm', activ='linear')
if options['prev2out']:
logit += emb
if options['ctx2out']:
logit += get_layer('ff')[1](tparams, ctxs[-1], options, prefix='ff_logit_ctx', activ='linear')
logit = tanh(logit)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
if options['n_layers_out'] > 1:
for lidx in xrange(1, options['n_layers_out']):
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit_h%d'%lidx, activ='rectifier')
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit', activ='linear')
logit_shp = logit.shape
next_probs = tensor.nnet.softmax(logit)
next_sample = trng.multinomial(pvals=next_probs).argmax(1)
# next word probability
f_next = theano.function([x, ctx]+init_state+init_memory, [next_probs, next_sample]+next_state+next_memory, name='f_next', profile=False)
return f_init, f_next
# generate sample
def gen_sample(tparams, f_init, f_next, ctx0, options,
trng=None, k=1, maxlen=30, stochastic=False):
"""Generate captions with beam search.
This function uses the beam search algorithm to conditionally
generate candidate captions. Supports beamsearch and stochastic
sampling.
Parameters
----------
tparams : OrderedDict()
dictionary of theano shared variables represented weight
matricies
f_init : theano function
input: annotation, output: initial lstm state and memory
(also performs transformation on ctx0 if using lstm_encoder)
f_next: theano function
takes the previous word/state/memory + ctx0 and runs one
step through the lstm
ctx0 : numpy array
annotation from convnet, of dimension #annotations x # dimension
[e.g (196 x 512)]
options : dict
dictionary of flags and options
trng : random number generator
k : int
size of beam search
maxlen : int
maximum allowed caption size
stochastic : bool
if True, sample stochastically
Returns
-------
sample : list of list
each sublist contains an (encoded) sample from the model
sample_score : numpy array
scores of each sample
"""
if k > 1:
assert not stochastic, 'Beam search does not support stochastic sampling'
sample = []
sample_score = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
hyp_memories = []
# only matters if we use lstm encoder
rval = f_init(ctx0)
ctx0 = rval[0]
next_state = []
next_memory = []
# the states are returned as a: (dim,) and this is just a reshape to (1, dim)
for lidx in xrange(options['n_layers_lstm']):
next_state.append(rval[1+lidx])
next_state[-1] = next_state[-1].reshape([1, next_state[-1].shape[0]])
for lidx in xrange(options['n_layers_lstm']):
next_memory.append(rval[1+options['n_layers_lstm']+lidx])
next_memory[-1] = next_memory[-1].reshape([1, next_memory[-1].shape[0]])
# reminder: if next_w = -1, the switch statement
# in build_sampler is triggered -> (empty word embeddings)
next_w = -1 * numpy.ones((1,)).astype('int64')
for ii in xrange(maxlen):
# our "next" state/memory in our previous step is now our "initial" state and memory
rval = f_next(*([next_w, ctx0]+next_state+next_memory))
next_p = rval[0]
next_w = rval[1]
# extract all the states and memories
next_state = []
next_memory = []
for lidx in xrange(options['n_layers_lstm']):
next_state.append(rval[2+lidx])
next_memory.append(rval[2+options['n_layers_lstm']+lidx])
if stochastic:
sample.append(next_w[0]) # if we are using stochastic sampling this easy
sample_score += next_p[0,next_w[0]]
if next_w[0] == 0:
break
else:
cand_scores = hyp_scores[:,None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)] # (k-dead_k) numpy array of with min nll
voc_size = next_p.shape[1]
# indexing into the correct selected captions
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat] # extract costs from top hypothesis
# a bunch of lists to hold future hypothesis
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype('float32')
new_hyp_states = []
for lidx in xrange(options['n_layers_lstm']):
new_hyp_states.append([])
new_hyp_memories = []
for lidx in xrange(options['n_layers_lstm']):
new_hyp_memories.append([])
# get the corresponding hypothesis and append the predicted word
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[idx]) # copy in the cost of that hypothesis
for lidx in xrange(options['n_layers_lstm']):
new_hyp_states[lidx].append(copy.copy(next_state[lidx][ti]))
for lidx in xrange(options['n_layers_lstm']):
new_hyp_memories[lidx].append(copy.copy(next_memory[lidx][ti]))
# check the finished samples for <eos> character
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
for lidx in xrange(options['n_layers_lstm']):
hyp_states.append([])
hyp_memories = []
for lidx in xrange(options['n_layers_lstm']):
hyp_memories.append([])
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
dead_k += 1 # completed sample!
else:
new_live_k += 1 # collect collect correct states/memories
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
for lidx in xrange(options['n_layers_lstm']):
hyp_states[lidx].append(new_hyp_states[lidx][idx])
for lidx in xrange(options['n_layers_lstm']):
hyp_memories[lidx].append(new_hyp_memories[lidx][idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = []
for lidx in xrange(options['n_layers_lstm']):
next_state.append(numpy.array(hyp_states[lidx]))
next_memory = []
for lidx in xrange(options['n_layers_lstm']):
next_memory.append(numpy.array(hyp_memories[lidx]))
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score
def pred_probs(f_log_probs, options, worddict, prepare_data, data, iterator, verbose=False):
""" Get log probabilities of captions
Parameters
----------
f_log_probs : theano function
compute the log probability of a x given the context
options : dict
options dictionary
worddict : dict
maps words to one-hot encodings
prepare_data : function
see corresponding dataset class for details
data : numpy array
output of load_data, see corresponding dataset class
iterator : KFold
indices from scikit-learn KFold
verbose : boolean
if True print progress
Returns
-------
probs : numpy array
array of log probabilities indexed by example
"""
n_samples = len(data[0])
probs = numpy.zeros((n_samples, 1)).astype('float32')
n_done = 0
for _, valid_index in iterator:
x, mask, ctx = prepare_data([data[0][t] for t in valid_index],
data[1],
worddict,
maxlen=None,
n_words=options['n_words'])
pred_probs = f_log_probs(x,mask,ctx)
probs[valid_index] = pred_probs[:,None]
n_done += len(valid_index)
if verbose:
print '%d/%d samples computed'%(n_done,n_samples)
return probs
def validate_options(options):
# Put friendly reminders here
if options['dim_word'] > options['dim']:
warnings.warn('dim_word should only be as large as dim.')
if options['lstm_encoder']:
warnings.warn('Note that this is a 1-D bidirectional LSTM, not 2-D one.')
if options['use_dropout_lstm']:
warnings.warn('dropout in the lstm seems not to help')
# Other checks:
if options['attn_type'] not in ['stochastic', 'deterministic']:
raise ValueError("specified attention type is not correct")
return options
"""Note: all the hyperparameters are stored in a dictionary model_options (or options outside train).
train() then proceeds to do the following:
1. The params are initialized (or reloaded)
2. The computations graph is built symbolically using Theano.
3. A cost is defined, then gradient are obtained automatically with tensor.grad :D
4. With some helper functions, gradient descent + periodic saving/printing proceeds
"""
def train(dim_word=100, # word vector dimensionality
ctx_dim=512, # context vector dimensionality
dim=1000, # the number of LSTM units
attn_type='stochastic', # [see section 4 from paper]
n_layers_att=1, # number of layers used to compute the attention weights
n_layers_out=1, # number of layers used to compute logit
n_layers_lstm=1, # number of lstm layers
n_layers_init=1, # number of layers to initialize LSTM at time 0
lstm_encoder=False, # if True, run bidirectional LSTM on input units
prev2out=False, # Feed previous word into logit
ctx2out=False, # Feed attention weighted ctx into logit
alpha_entropy_c=0.002, # hard attn param
RL_sumCost=True, # hard attn param
semi_sampling_p=0.5, # hard attn param
temperature=1., # hard attn param
patience=10,
max_epochs=5000,
dispFreq=100,
decay_c=0., # weight decay coeff
alpha_c=0., # doubly stochastic coeff
lrate=0.01, # used only for SGD
selector=False, # selector (see paper)
n_words=10000, # vocab size
maxlen=100, # maximum length of the description
optimizer='rmsprop',
batch_size = 16,
valid_batch_size = 16,
saveto='model.npz', # relative path of saved model file
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
sampleFreq=100, # generate some samples after every sampleFreq updates
dataset='flickr8k',
dictionary=None, # word dictionary
use_dropout=False, # setting this true turns on dropout at various points
use_dropout_lstm=False, # dropout on lstm gates
reload_=False,
save_per_epoch=False): # this saves down the model every epoch
# hyperparam dict
model_options = locals().copy()
model_options = validate_options(model_options)
# reload options
if reload_ and os.path.exists(saveto):
print "Reloading options"
with open('%s.pkl'%saveto, 'rb') as f:
model_options = pkl.load(f)
print "Using the following parameters:"
print model_options
print 'Loading data'
load_data, prepare_data = get_dataset(dataset)
train, valid, test, worddict = load_data()
# index 0 and 1 always code for the end of sentence and unknown token
word_idict = dict()
for kk, vv in worddict.iteritems():
word_idict[vv] = kk
word_idict[0] = '<eos>'
word_idict[1] = 'UNK'
# Initialize (or reload) the parameters using 'model_options'
# then build the Theano graph
print 'Building model'
params = init_params(model_options)
if reload_ and os.path.exists(saveto):
print "Reloading model"
params = load_params(saveto, params)
# numpy arrays -> theano shared variables
tparams = init_tparams(params)
# In order, we get:
# 1) trng - theano random number generator
# 2) use_noise - flag that turns on dropout
# 3) inps - inputs for f_grad_shared
# 4) cost - log likelihood for each sentence
# 5) opts_out - optional outputs (e.g selector)
trng, use_noise, \
inps, alphas, alphas_sample,\
cost, \
opt_outs = \
build_model(tparams, model_options)
# To sample, we use beam search: 1) f_init is a function that initializes
# the LSTM at time 0 [see top right of page 4], 2) f_next returns the distribution over
# words and also the new "initial state/memory" see equation
print 'Buliding sampler'
f_init, f_next = build_sampler(tparams, model_options, use_noise, trng)
# we want the cost without any the regularizers
f_log_probs = theano.function(inps, -cost, profile=False,
updates=opt_outs['attn_updates']
if model_options['attn_type']=='stochastic'
else None)
cost = cost.mean()
# add L2 regularization costs
if decay_c > 0.:
decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
weight_decay = 0.
for kk, vv in tparams.iteritems():
weight_decay += (vv ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
# Doubly stochastic regularization
if alpha_c > 0.:
alpha_c = theano.shared(numpy.float32(alpha_c), name='alpha_c')
alpha_reg = alpha_c * ((1.-alphas.sum(0))**2).sum(0).mean()
cost += alpha_reg
hard_attn_updates = []
# Backprop!
if model_options['attn_type'] == 'deterministic':
grads = tensor.grad(cost, wrt=itemlist(tparams))
else:
# shared variables for hard attention
baseline_time = theano.shared(numpy.float32(0.), name='baseline_time')
opt_outs['baseline_time'] = baseline_time
alpha_entropy_c = theano.shared(numpy.float32(alpha_entropy_c), name='alpha_entropy_c')
alpha_entropy_reg = alpha_entropy_c * (alphas*tensor.log(alphas)).mean()
# [see Section 4.1: Stochastic "Hard" Attention for derivation of this learning rule]
if model_options['RL_sumCost']:
grads = tensor.grad(cost, wrt=itemlist(tparams),
disconnected_inputs='raise',
known_grads={alphas:(baseline_time-opt_outs['masked_cost'].mean(0))[None,:,None]/10.*
(-alphas_sample/alphas) + alpha_entropy_c*(tensor.log(alphas) + 1)})
else:
grads = tensor.grad(cost, wrt=itemlist(tparams),
disconnected_inputs='raise',
known_grads={alphas:opt_outs['masked_cost'][:,:,None]/10.*
(alphas_sample/alphas) + alpha_entropy_c*(tensor.log(alphas) + 1)})
# [equation on bottom left of page 5]
hard_attn_updates += [(baseline_time, baseline_time * 0.9 + 0.1 * opt_outs['masked_cost'].mean())]
# updates from scan
hard_attn_updates += opt_outs['attn_updates']
# to getthe cost after regularization or the gradients, use this
# f_cost = theano.function([x, mask, ctx], cost, profile=False)
# f_grad = theano.function([x, mask, ctx], grads, profile=False)
# f_grad_shared computes the cost and updates adaptive learning rate variables
# f_update updates the weights of the model
lr = tensor.scalar(name='lr')
f_grad_shared, f_update = eval(optimizer)(lr, tparams, grads, inps, cost, hard_attn_updates)
print 'Optimization'
# [See note in section 4.3 of paper]
train_iter = HomogeneousData(train, batch_size=batch_size, maxlen=maxlen)
if valid:
kf_valid = KFold(len(valid[0]), n_folds=len(valid[0])/valid_batch_size, shuffle=False)
if test:
kf_test = KFold(len(test[0]), n_folds=len(test[0])/valid_batch_size, shuffle=False)
# history_errs is a bare-bones training log that holds the validation and test error
history_errs = []
# reload history
if reload_ and os.path.exists(saveto):
history_errs = numpy.load(saveto)['history_errs'].tolist()
best_p = None
bad_counter = 0
if validFreq == -1:
validFreq = len(train[0])/batch_size
if saveFreq == -1:
saveFreq = len(train[0])/batch_size
if sampleFreq == -1:
sampleFreq = len(train[0])/batch_size
uidx = 0
estop = False
for eidx in xrange(max_epochs):
n_samples = 0
print 'Epoch ', eidx
for caps in train_iter:
n_samples += len(caps)
uidx += 1
# turn on dropout
use_noise.set_value(1.)
# preprocess the caption, recording the
# time spent to help detect bottlenecks
pd_start = time.time()
x, mask, ctx = prepare_data(caps,
train[1],
worddict,
maxlen=maxlen,
n_words=n_words)
pd_duration = time.time() - pd_start
if x is None:
print 'Minibatch with zero sample under length ', maxlen
continue
# get the cost for the minibatch, and update the weights
ud_start = time.time()
cost = f_grad_shared(x, mask, ctx)
f_update(lrate)
ud_duration = time.time() - ud_start # some monitoring for each mini-batch
# Numerical stability check
if numpy.isnan(cost) or numpy.isinf(cost):
print 'NaN detected'
return 1., 1., 1.
if numpy.mod(uidx, dispFreq) == 0:
print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost, 'PD ', pd_duration, 'UD ', ud_duration
# Checkpoint
if numpy.mod(uidx, saveFreq) == 0:
print 'Saving...',
if best_p is not None:
params = copy.copy(best_p)
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, **params)
pkl.dump(model_options, open('%s.pkl'%saveto, 'wb'))
print 'Done'
# Print a generated sample as a sanity check
if numpy.mod(uidx, sampleFreq) == 0:
# turn off dropout first
use_noise.set_value(0.)
x_s = x
mask_s = mask
ctx_s = ctx
# generate and decode the a subset of the current training batch
for jj in xrange(numpy.minimum(10, len(caps))):
sample, score = gen_sample(tparams, f_init, f_next, ctx_s[jj], model_options,
trng=trng, k=5, maxlen=30, stochastic=False)
# Decode the sample from encoding back to words
print 'Truth ',jj,': ',
for vv in x_s[:,jj]:
if vv == 0:
break
if vv in word_idict:
print word_idict[vv],
else:
print 'UNK',
print
for kk, ss in enumerate([sample[0]]):
print 'Sample (', kk,') ', jj, ': ',
for vv in ss:
if vv == 0:
break
if vv in word_idict:
print word_idict[vv],
else:
print 'UNK',
print
# Log validation loss + checkpoint the model with the best validation log likelihood
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
train_err = 0
valid_err = 0
test_err = 0
if valid:
valid_err = -pred_probs(f_log_probs, model_options, worddict, prepare_data, valid, kf_valid).mean()
if test:
test_err = -pred_probs(f_log_probs, model_options, worddict, prepare_data, test, kf_test).mean()
history_errs.append([valid_err, test_err])
# the model with the best validation long likelihood is saved seperately with a different name
if uidx == 0 or valid_err <= numpy.array(history_errs)[:,0].min():
best_p = unzip(tparams)
print 'Saving model with best validation ll'
params = copy.copy(best_p)
params = unzip(tparams)
numpy.savez(saveto+'_bestll', history_errs=history_errs, **params)
bad_counter = 0
# abort training if perplexity has been increasing for too long
if eidx > patience and len(history_errs) > patience and valid_err >= numpy.array(history_errs)[:-patience,0].min():
bad_counter += 1
if bad_counter > patience:
print 'Early Stop!'
estop = True
break
print 'Train ', train_err, 'Valid ', valid_err, 'Test ', test_err
print 'Seen %d samples' % n_samples
if estop:
break
if save_per_epoch:
numpy.savez(saveto + '_epoch_' + str(eidx + 1), history_errs=history_errs, **unzip(tparams))
# use the best nll parameters for final checkpoint (if they exist)
if best_p is not None:
zipp(best_p, tparams)
use_noise.set_value(0.)
train_err = 0
valid_err = 0
test_err = 0
if valid:
valid_err = -pred_probs(f_log_probs, model_options, worddict, prepare_data, valid, kf_valid)
if test:
test_err = -pred_probs(f_log_probs, model_options, worddict, prepare_data, test, kf_test)
print 'Train ', train_err, 'Valid ', valid_err, 'Test ', test_err
params = copy.copy(best_p)
numpy.savez(saveto, zipped_params=best_p, train_err=train_err,
valid_err=valid_err, test_err=test_err, history_errs=history_errs,
**params)
return train_err, valid_err, test_err
if __name__ == '__main__':
pass
| mit |
vortex-ape/scikit-learn | examples/cluster/plot_dbscan.py | 39 | 2534 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
# #############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
# #############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
# #############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = [plt.cm.Spectral(each)
for each in np.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
jaeilepp/mne-python | mne/preprocessing/tests/test_infomax.py | 6 | 5969 | # Authors: Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
"""
Test the infomax algorithm.
Parts of this code are taken from scikit-learn
"""
import numpy as np
from numpy.testing import assert_almost_equal
from scipy import stats
from scipy import linalg
from mne.preprocessing.infomax_ import infomax
from mne.utils import requires_sklearn, run_tests_if_main, check_version
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
@requires_sklearn
def test_infomax_blowup():
""" Test the infomax algorithm blowup condition
"""
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 100
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
center_and_norm(m)
X = _get_pca().fit_transform(m.T)
k_ = infomax(X, extended=True, l_rate=0.1)
s_ = np.dot(k_, X.T)
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
@requires_sklearn
def test_infomax_simple():
""" Test the infomax algorithm on very simple data.
"""
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 500
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
for add_noise in (False, True):
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, n_samples)
center_and_norm(m)
algos = [True, False]
for algo in algos:
X = _get_pca().fit_transform(m.T)
k_ = infomax(X, extended=algo)
s_ = np.dot(k_, X.T)
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
def test_infomax_weights_ini():
""" Test the infomax algorithm when user provides an initial weights matrix.
"""
X = np.random.random((3, 100))
weights = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
w1 = infomax(X, max_iter=0, weights=weights, extended=True)
w2 = infomax(X, max_iter=0, weights=weights, extended=False)
assert_almost_equal(w1, weights)
assert_almost_equal(w2, weights)
@requires_sklearn
def test_non_square_infomax():
""" Test non-square infomax
"""
rng = np.random.RandomState(0)
n_samples = 200
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
n_observed = 6
mixing = rng.randn(n_observed, 2)
for add_noise in (False, True):
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(n_observed, n_samples)
center_and_norm(m)
m = m.T
m = _get_pca(rng).fit_transform(m)
# we need extended since input signals are sub-gaussian
unmixing_ = infomax(m, random_state=rng, extended=True)
s_ = np.dot(unmixing_, m.T)
# Check that the mixing model described in the docstring holds:
mixing_ = linalg.pinv(unmixing_.T)
assert_almost_equal(m, s_.T.dot(mixing_))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
def _get_pca(rng=None):
if not check_version('sklearn', '0.18'):
from sklearn.decomposition import RandomizedPCA
return RandomizedPCA(n_components=2, whiten=True,
random_state=rng)
else:
from sklearn.decomposition import PCA
return PCA(n_components=2, whiten=True, svd_solver='randomized',
random_state=rng)
run_tests_if_main()
| bsd-3-clause |
latreach/ResponseTimeSeat | facebookRepliesComments.py | 1 | 2484 | """
Este script está diseñado para obtener todos los comentarios hijos
de los comentarios (valga la redundancia) de los posteos de facebook (comments replies).
Se necesita el access token de la cuenta comercial del último nivel
Este script solo servirá para el dueño de la cuenta en cuestión
"""
"""
## Autor
#Creado por Fernando Dorantes Nieto <(°)
# ( >)"
# /|
"""
## Librerías----------------------
import pandas as pd
import numpy as np
import facebook
import sys
import datetime
import itertools
import time
from itertools import chain
from datetime import timedelta, date
##Comenzando conexión---------------------
token ="tuToken"
api= facebook.GrapApi(token)
##Leyendo los ids ------------------------
##Los ids fueron obtenidos con R
ids = open("idsComments.txt", "r").read()
ids = ids.split()
strings = []
for i in ids:
objeto = "".join([i,"/comments"])
strings.append(objeto)
##Obteniendo los comentarios hijos
args = {"fields": "parent,message,id, created_time,from"}
RepliesComments =[]
SinResponder = []
contador = 0
for i in strings:
try:
replies = api.get_object(i **args)
replies = replies["data"]
contador = contador + 1
print [i, contador]
if replies !=[]:
RepliesComments.append(replies)
else:
SinResponder.append(i)
except facebook.GraphAPIError:
next
print [i, "No se puede leer este ID"]
Replies = list(chain.from_iterable(RepliesComments))
RepliesExtract =[]
for i in Replies:
temp = dict(IDReply =i["id"])
temp.update(FechaReply = i["created_time"])
temp.uptate(IDUsuarioReply = i["from"]["id"])
temp.update(NombreUsuarioReply = i["from"]["name"])
temp.update(MensajeReply=i["message"])
temp.update(FechaCommentParent = i["parent"]["created_time"])
temp.update(NombreUsuarioCommentParent = i["parent"]["from"]["name"])
temp.update(IDUsuarioCommentParent = i["parent"]["from"]["id"])
temp.update(IDParent=i["parent"]["id"])
temp.update(MensajeParent=i["parent"]["message"])
RepliesExtract.append(temp)
RepliesDataFrame = pd.DataFrame.from_dict(RepliesExtract)
RepliesDataFrame.to_csv("CommentsReply.csv", sep=",", header=True, index=False,
encoding="utf-8)
Norespondidos = open("SinResponder.txt", "w")
for i in SinResponder:
print >> Norespondidos, i
Norespondidos.close()
| gpl-3.0 |
shutterfly/logtools | logtools/_plot.py | 2 | 15639 | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
logtools._plot
Plotting methods for logfiles
"""
import os
import re
import sys
import locale
import logging
import unicodedata
from itertools import imap
from random import randint
from datetime import datetime
from operator import itemgetter
from optparse import OptionParser
from abc import ABCMeta, abstractmethod
from _config import logtools_config, interpolate_config, AttrDict
__all__ = ['logplot_parse_args', 'logplot', 'logplot_main']
locale.setlocale(locale.LC_ALL, "")
class PlotBackend(object):
__metaclass__ = ABCMeta
@abstractmethod
def plot(self, options, args, fh):
"""Plot using backend implementation"""
class GChartBackend(PlotBackend):
"""Google Chart API plotting backend.
uses the pygooglechart python package"""
def __init__(self):
PlotBackend.__init__(self)
def plot(self, options, args, fh):
"""Plot using google charts api"""
try:
import pygooglechart
except ImportError:
logging.error("pygooglechart Python package must be installed to use the 'gchart' backend")
sys.exit(-1)
try:
chart = {
'pie': self._plot_pie,
'line': self._plot_line,
'timeseries': self._plot_timeseries
}[options.type](options, args, fh)
except KeyError:
raise KeyError("Invalid plot type: '%s'" % options.type)
else:
if options.get('title', None):
chart.set_title(options.title)
if options.get('output', None):
chart.download(options.output)
return chart
def _plot_line(self, options, args, fh):
"""Plot a line chart"""
from pygooglechart import Chart, SimpleLineChart, Axis
delimiter = options.delimiter
field = options.field-1
pts = []
for l in imap(lambda x: x.strip(), fh):
splitted_line = l.split(delimiter)
k = float(splitted_line.pop(field))
pts.append((k, ' '.join(splitted_line)))
if options.get('limit', None):
# Only wanna use top N samples by key, sort and truncate
pts = sorted(pts, key=itemgetter(0), reverse=True)[:options.limit]
if not pts:
raise ValueError("No data to plot")
max_y = int(max((v for v, label in pts)))
chart = SimpleLineChart(options.width, options.height,y_range=[0, max_y])
# Styling
chart.set_colours(['0000FF'])
chart.fill_linear_stripes(Chart.CHART, 0, 'CCCCCC', 0.2, 'FFFFFF', 0.2)
chart.set_grid(0, 25, 5, 5)
data, labels = zip(*pts)
chart.add_data(data)
# Axis labels
chart.set_axis_labels(Axis.BOTTOM, labels)
left_axis = range(0, max_y + 1, 25)
left_axis[0] = ''
chart.set_axis_labels(Axis.LEFT, left_axis)
return chart
def _plot_pie(self, options, args, fh):
"""Plot a pie chart"""
from pygooglechart import PieChart3D, PieChart2D
delimiter = options.delimiter
field = options.field-1
chart = PieChart2D(options.width, options.height)
pts = []
for l in imap(lambda x: x.strip(), fh):
splitted_line = l.split(delimiter)
k = int(splitted_line.pop(field))
pts.append((k, ' '.join(splitted_line), locale.format('%d', k, True)))
if options.get('limit', None):
# Only wanna use top N samples by key, sort and truncate
pts = sorted(pts, key=itemgetter(0), reverse=True)[:options.limit]
if not pts:
raise ValueError("No data to plot")
data, labels, legend = zip(*pts)
chart.add_data(data)
chart.set_pie_labels(labels)
if options.get('legend', None) is True:
chart.set_legend(map(str, legend))
return chart
def _plot_timeseries(self, options, args, fh):
"""Plot a timeseries graph"""
from pygooglechart import Chart, SimpleLineChart, Axis
delimiter = options.delimiter
field = options.field-1
datefield = options.datefield-1
pts = []
for l in imap(lambda x: x.strip(), fh):
splitted_line = l.split(delimiter)
v = float(splitted_line[field])
t = datetime.strptime(splitted_line[datefield], options.dateformat)
pts.append((t, v))
if options.get('limit', None):
# Only wanna use top (earliest) N samples by key, sort and truncate
pts = sorted(pts, key=itemgetter(0), reverse=True)[:options.limit]
if not pts:
raise ValueError("No data to plot")
max_y = int(max((v for t, v in pts)))
chart = SimpleLineChart(options.width, options.height,y_range=[0, max_y])
# Styling
chart.set_colours(['0000FF'])
chart.fill_linear_stripes(Chart.CHART, 0, 'CCCCCC', 0.2, 'FFFFFF', 0.2)
chart.set_grid(0, 25, 5, 5)
ts, vals = zip(*pts)
chart.add_data(vals)
# Axis labels
chart.set_axis_labels(Axis.BOTTOM, ts)
left_axis = range(0, max_y + 1, 25)
left_axis[0] = ''
chart.set_axis_labels(Axis.LEFT, left_axis)
return chart
class MatplotlibBackend(PlotBackend):
"""Use matplotlib (pylab) for rendering plots"""
def __init__(self):
PlotBackend.__init__(self)
def plot(self, options, args, fh):
"""Plot using google charts api"""
try:
import pylab
except ImportError:
logging.error("matplotlib Python package must be installed to use the 'matplotlib' backend")
sys.exit(-1)
try:
chart = {
'hist': self._plot_hist,
'pie': self._plot_pie,
'line': self._plot_line,
'timeseries': self._plot_timeseries
}[options.type](options, args, fh)
except KeyError:
raise KeyError("Invalid plot type: '%s'" % options.type)
else:
if options.get('title', None):
chart.get_axes()[0].set_title(options.title)
if options.get('output', None):
chart.savefig(options.output)
return chart
def _plot_hist(self, options, args, fh):
"""Plot a histogram"""
import pylab
delimiter = options.delimiter
field = options.field-1
pts = []
max_y = -float("inf")
for l in imap(lambda x: x.strip(), fh):
splitted_line = l.split(delimiter)
k = float(splitted_line.pop(field))
pts.append((k, ' '.join(splitted_line)))
if k > max_y:
max_y = k
if options.get('limit', None):
# Only wanna use top N samples by key, sort and truncate
pts = sorted(pts, key=itemgetter(0), reverse=True)[:options.limit]
if not pts:
raise ValueError("No data to plot")
data, labels = zip(*pts)
normed = False
bins = len(data)/100.
f = pylab.figure()
pylab.hist(data, bins=bins, normed=normed)
return f
def _plot_pie(self, options, args, fh):
"""Plot pie chart"""
from pylab import figure, pie, legend
import matplotlib as mpl
mpl.rc('font', size=8)
delimiter = options.delimiter
field = options.field-1
pts = []
ttl = 0.
for l in imap(lambda x: x.strip(), fh):
splitted_line = l.split(delimiter)
k = float(splitted_line.pop(field))
ttl += k
pts.append((k, ' '.join(splitted_line), locale.format('%d', k, True)))
if options.get('limit', None):
# Only wanna use top N samples by key, sort and truncate
pts = sorted(pts, key=itemgetter(0), reverse=True)[:options.limit]
if not pts or ttl==0:
raise ValueError("No data to plot")
data, labels, _legend = zip(*pts)
data = list(data)
# Normalize
for idx, pt in enumerate(data):
data[idx] /= ttl
f = figure()
pie(data, labels=labels, autopct='%1.1f%%', shadow=True)
if options.get('legend', None) is True:
legend(_legend, loc=3)
return f
def _plot_line(self, options, args, fh):
"""Line plot using matplotlib"""
import pylab
delimiter = options.delimiter
field = options.field-1
pts = []
max_y = -float("inf")
for l in imap(lambda x: x.strip(), fh):
splitted_line = l.split(delimiter)
k = float(splitted_line.pop(field))
label = unicodedata.normalize('NFKD', \
unicode(' '.join(splitted_line), 'utf-8')).encode('ascii','ignore')
pts.append((k, label))
if k > max_y:
max_y = k
if options.get('limit', None):
# Only wanna use top N samples by key, sort and truncate
pts = sorted(pts, key=itemgetter(0), reverse=True)[:options.limit]
if not pts:
raise ValueError("No data to plot")
data, labels = zip(*pts)
f = pylab.figure()
pylab.plot(xrange(len(data)), data, "*--b")
if options.get('legend', None):
pylab.xticks(xrange(len(labels)), labels, rotation=17)
return f
def _plot_timeseries(self, options, args, fh):
"""Line plot using matplotlib"""
import pylab
import matplotlib.ticker as ticker
delimiter = options.delimiter
field = options.field-1
datefield = options.datefield-1
pts = []
max_y = -float("inf")
for l in imap(lambda x: x.strip(), fh):
splitted_line = l.split(delimiter)
v = float(splitted_line[field])
t = datetime.strptime(splitted_line[datefield], options.dateformat)
pts.append((t, v))
if v > max_y:
max_y = v
if options.get('limit', None):
# Only use top N samples by key, sort and truncate
pts = sorted(pts, key=itemgetter(0), reverse=True)[:options.limit]
if not pts:
raise ValueError("No data to plot")
N = len(pts)
ts, vals = zip(*pts)
def format_date(x, pos=None):
thisind = int(max(0, min(x, N)))
return ts[thisind].strftime(options.dateformat)
f = pylab.figure()
ax = f.add_subplot(111)
ax.plot(xrange(len(vals)), vals, "*--b")
ax.xaxis.set_major_formatter(ticker.FuncFormatter(format_date))
f.autofmt_xdate()
return f
def logplot_parse_args():
parser = OptionParser()
parser.add_option("-b", "--backend", dest="backend",
help="Backend to use for plotting. Currently available backends: 'gchart', 'matplotlib'")
parser.add_option("-T", "--type", dest="type",
help="Chart type. Available types: 'pie', 'histogram', 'line'." \
"Availability might differ due to backend.")
parser.add_option("-f", "--field", dest="field", type=int,
help="Index of field to use as main input for plot")
parser.add_option("-d", "--delimiter", dest="delimiter",
help="Delimiter character for field-separation")
parser.add_option("-o", "--output", dest="output", help="Output filename")
parser.add_option("-W", "--width", dest="width", type=int, help="Plot Width")
parser.add_option("-H", "--height", dest="height", type=int, help="Plot Height")
parser.add_option("-L", "--limit", dest="limit", type=int,
help="Only plot the top N rows, sorted decreasing by key")
parser.add_option("-l", "--legend", dest="legend", action="store_true",
help="Render Plot Legend")
parser.add_option("-t", "--title", dest="title",
help="Plot Title")
parser.add_option("--datefield", dest="datefield", type=int,
help="Index of field to use as date-time source (for timeseries plots)")
parser.add_option("--dateformat", dest="dateformat",
help="Format string for parsing date-time field (for timeseries plots)")
parser.add_option("-P", "--profile", dest="profile", default='logplot',
help="Configuration profile (section in configuration file)")
options, args = parser.parse_args()
# Interpolate from configuration
options.backend = interpolate_config(options.backend, options.profile, 'backend')
options.type = interpolate_config(options.type, options.profile, 'type')
options.field = interpolate_config(options.field, options.profile, 'field', type=int)
options.delimiter = interpolate_config(options.delimiter, options.profile, 'delimiter')
options.output = interpolate_config(options.output, options.profile, 'output', default=False)
options.width = interpolate_config(options.width, options.profile, 'width', type=int)
options.height = interpolate_config(options.height, options.profile, 'height', type=int)
options.limit = interpolate_config(options.limit, options.profile, 'limit', type=int, default=False)
options.legend = interpolate_config(options.legend, options.profile, 'legend', type=bool, default=False)
options.title = interpolate_config(options.title, options.profile, 'title', default=False)
options.datefield = interpolate_config(options.datefield, options.profile, 'datefield', type=int, default=False)
options.dateformat = interpolate_config(options.dateformat, options.profile, 'dateformat', default=False)
return AttrDict(options.__dict__), args
def logplot(options, args, fh):
"""Plot some index defined over the logstream,
using user-specified backend"""
return {
"gchart": GChartBackend(),
"matplotlib": MatplotlibBackend()
}[options.backend].plot(options, args, fh)
def logplot_main():
"""Console entry-point"""
options, args = logplot_parse_args()
logplot(options, args, fh=sys.stdin)
return 0
| apache-2.0 |
xyguo/scikit-learn | examples/manifold/plot_lle_digits.py | 138 | 8594 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble,
discriminant_analysis, random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
Migelo/mpa_garching | 1/utils.py | 1 | 1742 | import numpy as np
import os
from random import shuffle
import math
import matplotlib as mpl
figsize = np.array([8.268, 8.268*2**.5])
tick_labelsize = mpl.rcParamsOrig['xtick.labelsize']
axes_labelsize = mpl.rcParamsOrig['axes.labelsize']
def prepare_step(y):
'''
Prepare data for a nice step plot.
Args:
y (list): List containing bin values.
Returns:
y (list): List containing corrected bin values.
'''
y = np.insert(y, 0, y[0])
return y
'''Generate combinations of halos with all tracing types.'''
halos = ('M0408', 'M0501', 'M0616', 'M0664', 'M0858', 'M0959',
'M0977', 'M1192', 'M1196', 'M1646', 'M1859', 'M2283')
#types = ('disc-Uebler', 'disc', 'ball', 'ism')
types = ('ism', )
combinations = []
for halo in halos:
for type in types:
combinations.append((halo, type))
#shuffle(combinations)
def save(path, filename):
'''
Create directory if it does not exist.
Args:
path (str): Path for the new directory.
Returns:
None
'''
if not os.path.isdir(path):
os.mkdir(path)
plt.savefig('%s/%s' % (path, filename))
def finite(data):
return data[np.isfinite(data)]
def norm_hist(data, maximum):
return np.array(data) * float(maximum) / max(data)
def round_to_n(x, n):
" Round x to n significant figures "
return round(x, -int(math.floor(np.sign(x) * np.log10(abs(x)))) + n)
def str_fmt(x, n=1):
" Format x into nice Latex rounding to n"
if x == 0:
return 0
power = int(np.log10(round_to_n(x, 0)))
f_SF = round_to_n(x, n) * pow(10, -power)
return r"%s\cdot 10^{%s}" % (f_SF, power)
def flatten_list(array):
return [item for sublist in array for item in sublist]
| mit |
ambikeshwar1991/gnuradio-3.7.4 | gr-digital/examples/berawgn.py | 17 | 4897 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
BER simulation for QPSK signals, compare to theoretical values.
Change the N_BITS value to simulate more bits per Eb/N0 value,
thus allowing to check for lower BER values.
Lower values will work faster, higher values will use a lot of RAM.
Also, this app isn't highly optimized--the flow graph is completely
reinstantiated for every Eb/N0 value.
Of course, expect the maximum value for BER to be one order of
magnitude below what you chose for N_BITS.
"""
import math
import numpy
from gnuradio import gr, digital
from gnuradio import analog
from gnuradio import blocks
import sys
try:
from scipy.special import erfc
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
# Best to choose powers of 10
N_BITS = 1e7
RAND_SEED = 42
def berawgn(EbN0):
""" Calculates theoretical bit error rate in AWGN (for BPSK and given Eb/N0) """
return 0.5 * erfc(math.sqrt(10**(float(EbN0)/10)))
class BitErrors(gr.hier_block2):
""" Two inputs: true and received bits. We compare them and
add up the number of incorrect bits. Because integrate_ff()
can only add up a certain number of values, the output is
not a scalar, but a sequence of values, the sum of which is
the BER. """
def __init__(self, bits_per_byte):
gr.hier_block2.__init__(self, "BitErrors",
gr.io_signature(2, 2, gr.sizeof_char),
gr.io_signature(1, 1, gr.sizeof_int))
# Bit comparison
comp = blocks.xor_bb()
intdump_decim = 100000
if N_BITS < intdump_decim:
intdump_decim = int(N_BITS)
self.connect(self,
comp,
blocks.unpack_k_bits_bb(bits_per_byte),
blocks.uchar_to_float(),
blocks.integrate_ff(intdump_decim),
blocks.multiply_const_ff(1.0/N_BITS),
self)
self.connect((self, 1), (comp, 1))
class BERAWGNSimu(gr.top_block):
" This contains the simulation flow graph "
def __init__(self, EbN0):
gr.top_block.__init__(self)
self.const = digital.qpsk_constellation()
# Source is N_BITS bits, non-repeated
data = map(int, numpy.random.randint(0, self.const.arity(), N_BITS/self.const.bits_per_symbol()))
src = blocks.vector_source_b(data, False)
mod = digital.chunks_to_symbols_bc((self.const.points()), 1)
add = blocks.add_vcc()
noise = analog.noise_source_c(analog.GR_GAUSSIAN,
self.EbN0_to_noise_voltage(EbN0),
RAND_SEED)
demod = digital.constellation_decoder_cb(self.const.base())
ber = BitErrors(self.const.bits_per_symbol())
self.sink = blocks.vector_sink_f()
self.connect(src, mod, add, demod, ber, self.sink)
self.connect(noise, (add, 1))
self.connect(src, (ber, 1))
def EbN0_to_noise_voltage(self, EbN0):
""" Converts Eb/N0 to a single-sided noise voltage (assuming unit symbol power) """
return 1.0 / math.sqrt(2.0 * self.const.bits_per_symbol() * 10**(float(EbN0)/10))
def simulate_ber(EbN0):
""" All the work's done here: create flow graph, run, read out BER """
print "Eb/N0 = %d dB" % EbN0
fg = BERAWGNSimu(EbN0)
fg.run()
return numpy.sum(fg.sink.data())
if __name__ == "__main__":
EbN0_min = 0
EbN0_max = 15
EbN0_range = range(EbN0_min, EbN0_max+1)
ber_theory = [berawgn(x) for x in EbN0_range]
print "Simulating..."
ber_simu = [simulate_ber(x) for x in EbN0_range]
f = pylab.figure()
s = f.add_subplot(1,1,1)
s.semilogy(EbN0_range, ber_theory, 'g-.', label="Theoretical")
s.semilogy(EbN0_range, ber_simu, 'b-o', label="Simulated")
s.set_title('BER Simulation')
s.set_xlabel('Eb/N0 (dB)')
s.set_ylabel('BER')
s.legend()
s.grid()
pylab.show()
| gpl-3.0 |
sserrot/champion_relationships | venv/share/doc/networkx-2.4/examples/advanced/plot_parallel_betweenness.py | 8 | 2872 | """
====================
Parallel Betweenness
====================
Example of parallel implementation of betweenness centrality using the
multiprocessing module from Python Standard Library.
The function betweenness centrality accepts a bunch of nodes and computes
the contribution of those nodes to the betweenness centrality of the whole
network. Here we divide the network in chunks of nodes and we compute their
contribution to the betweenness centrality of the whole network.
This doesn't work in python2.7.13. It does work in 3.6, 3.5, 3.4, and 3.3.
It may be related to this:
https://stackoverflow.com/questions/1816958/cant-pickle-type-instancemethod-when-using-multiprocessing-pool-map
"""
from multiprocessing import Pool
import time
import itertools
import matplotlib.pyplot as plt
import networkx as nx
def chunks(l, n):
"""Divide a list of nodes `l` in `n` chunks"""
l_c = iter(l)
while 1:
x = tuple(itertools.islice(l_c, n))
if not x:
return
yield x
def _betmap(G_normalized_weight_sources_tuple):
"""Pool for multiprocess only accepts functions with one argument.
This function uses a tuple as its only argument. We use a named tuple for
python 3 compatibility, and then unpack it when we send it to
`betweenness_centrality_source`
"""
return nx.betweenness_centrality_source(*G_normalized_weight_sources_tuple)
def betweenness_centrality_parallel(G, processes=None):
"""Parallel betweenness centrality function"""
p = Pool(processes=processes)
node_divisor = len(p._pool) * 4
node_chunks = list(chunks(G.nodes(), int(G.order() / node_divisor)))
num_chunks = len(node_chunks)
bt_sc = p.map(_betmap,
zip([G] * num_chunks,
[True] * num_chunks,
[None] * num_chunks,
node_chunks))
# Reduce the partial solutions
bt_c = bt_sc[0]
for bt in bt_sc[1:]:
for n in bt:
bt_c[n] += bt[n]
return bt_c
if __name__ == "__main__":
G_ba = nx.barabasi_albert_graph(1000, 3)
G_er = nx.gnp_random_graph(1000, 0.01)
G_ws = nx.connected_watts_strogatz_graph(1000, 4, 0.1)
for G in [G_ba, G_er, G_ws]:
print("")
print("Computing betweenness centrality for:")
print(nx.info(G))
print("\tParallel version")
start = time.time()
bt = betweenness_centrality_parallel(G)
print("\t\tTime: %.4F" % (time.time() - start))
print("\t\tBetweenness centrality for node 0: %.5f" % (bt[0]))
print("\tNon-Parallel version")
start = time.time()
bt = nx.betweenness_centrality(G)
print("\t\tTime: %.4F seconds" % (time.time() - start))
print("\t\tBetweenness centrality for node 0: %.5f" % (bt[0]))
print("")
nx.draw(G_ba)
plt.show()
| mit |
rexshihaoren/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
broadinstitute/gatk | src/main/python/org/broadinstitute/hellbender/vqsr_cnn/vqsr_cnn/plots.py | 6 | 6827 | # plots.py
#
# Plotting code for Variant Filtration with Neural Nets
# This includes evaluation plots like Precision and Recall curves,
# various flavors of Receiver Operating Characteristic (ROC curves),
# As well as graphs of the metrics that are watched during neural net training.
#
# December 2016
# Sam Friedman
# sam@broadinstitute.org
# Imports
import os
import math
import matplotlib
import numpy as np
matplotlib.use('Agg') # Need this to write images from the GSA servers. Order matters:
import matplotlib.pyplot as plt # First import matplotlib, then use Agg, then import plt
from sklearn.metrics import roc_curve, auc, roc_auc_score, precision_recall_curve, average_precision_score
image_ext = '.png'
color_array = ['red', 'indigo', 'cyan', 'pink', 'purple']
key_colors = {
'Neural Net':'green', 'CNN_SCORE':'green', 'CNN_2D':'green',
'Heng Li Hard Filters':'lightblue',
'GATK Hard Filters':'orange','GATK Signed Distance':'darksalmon',
'VQSR gnomAD':'cornflowerblue', 'VQSR Single Sample':'blue', 'VQSLOD':'cornflowerblue',
'Deep Variant':'magenta', 'QUAL':'magenta', 'DEEP_VARIANT_QUAL':'magenta',
'Random Forest':'darkorange',
'SNP':'cornflowerblue', 'NOT_SNP':'orange', 'INDEL':'green', 'NOT_INDEL':'red',
'VQSLOD none':'cornflowerblue', 'VQSLOD strModel':'orange', 'VQSLOD default':'green',
'REFERENCE':'green', 'HET_SNP':'cornflowerblue', 'HOM_SNP':'blue', 'HET_DELETION':'magenta',
'HOM_DELETION':'violet', 'HET_INSERTION':'orange', 'HOM_INSERTION':'darkorange'
}
precision_label = 'Precision | Positive Predictive Value | TP/(TP+FP)'
recall_label = 'Recall | Sensitivity | True Positive Rate | TP/(TP+FN)'
fallout_label = 'Fallout | 1 - Specificity | False Positive Rate | FP/(FP+TN)'
def get_fpr_tpr_roc(model, test_data, test_truth, labels, batch_size=32):
"""Get false positive and true positive rates from a classification model.
Arguments:
model: The model whose predictions to evaluate.
test_data: Input testing data in the shape the model expects.
test_truth: The true labels of the testing data
labels: dict specifying the class labels.
batch_size: Size of batches for prediction over the test data.
Returns:
dict, dict, dict: false positive rate, true positive rate, and area under ROC curve.
The dicts all use label indices as keys. fpr and tpr dict's values are lists
(the x and y coordinates that defines the ROC curves) and for AUC the value is a float.
"""
y_pred = model.predict(test_data, batch_size=batch_size, verbose=0)
return get_fpr_tpr_roc_pred(y_pred, test_truth, labels)
def get_fpr_tpr_roc_pred(y_pred, test_truth, labels):
"""Get false positive and true positive rates from predictions and true labels.
Arguments:
y_pred: model predictions to evaluate.
test_truth: The true labels of the testing data
labels: dict specifying the class labels.
Returns:
dict, dict, dict: false positive rate, true positive rate, and area under ROC curve.
The dicts all use label indices as keys. fpr and tpr dict's values are lists
(the x and y coordinates that defines the ROC curves) and for AUC the value is a float.
"""
fpr = dict()
tpr = dict()
roc_auc = dict()
for k in labels.keys():
cur_idx = labels[k]
fpr[labels[k]], tpr[labels[k]], _ = roc_curve(test_truth[:,cur_idx], y_pred[:,cur_idx])
roc_auc[labels[k]] = auc(fpr[labels[k]], tpr[labels[k]])
return fpr, tpr, roc_auc
def plot_roc_per_class(model, test_data, test_truth, labels, title, batch_size=32, prefix='./figures/'):
"""Plot a per class ROC curve.
Arguments:
model: The model whose predictions to evaluate.
test_data: Input testing data in the shape the model expects.
test_truth: The true labels of the testing data
labels: dict specifying the class labels.
title: the title to display on the plot.
batch_size: Size of batches for prediction over the test data.
prefix: path specifying where to save the plot.
"""
fpr, tpr, roc_auc = get_fpr_tpr_roc(model, test_data, test_truth, labels, batch_size)
lw = 3
plt.figure(figsize=(28,22))
matplotlib.rcParams.update({'font.size': 34})
for key in labels.keys():
if key in key_colors:
color = key_colors[key]
else:
color = np.random.choice(color_array)
plt.plot(fpr[labels[key]], tpr[labels[key]], color=color, lw=lw,
label=str(key)+' area under ROC: %0.3f'%roc_auc[labels[key]])
plt.plot([0, 1], [0, 1], 'k:', lw=0.5)
plt.xlim([0.0, 1.0])
plt.ylim([-0.02, 1.03])
plt.xlabel(fallout_label)
plt.ylabel(recall_label)
plt.title('ROC:'+ title + '\n')
matplotlib.rcParams.update({'font.size': 56})
plt.legend(loc="lower right")
figure_path = prefix+"per_class_roc_"+title+image_ext
if not os.path.exists(os.path.dirname(figure_path)):
os.makedirs(os.path.dirname(figure_path))
plt.savefig(figure_path)
print('Saved figure at:', figure_path)
def plot_metric_history(history, title, prefix='./figures/'):
"""Plot metric history throughout training.
Arguments:
history: History object returned by Keras fit function.
title: the title to display on the plot.
prefix: path specifying where to save the plot.
"""
num_plots = len([k for k in history.history.keys() if not 'val' in k])
row = 0
col = 0
rows = 4
cols = max(2, int(math.ceil(num_plots/float(rows))))
f, axes = plt.subplots(rows, cols, sharex=True, figsize=(36, 24))
for k in sorted(history.history.keys()):
if 'val' not in k:
axes[row, col].plot(history.history[k])
axes[row, col].set_ylabel(str(k))
axes[row, col].set_xlabel('epoch')
if 'val_'+k in history.history:
axes[row, col].plot(history.history['val_'+k])
labels = ['train', 'valid']
else:
labels = [k]
axes[row, col].legend(labels, loc='upper left')
row += 1
if row == rows:
row = 0
col += 1
if row*col >= rows*cols:
break
axes[0, 1].set_title(title)
figure_path = prefix+"metric_history_"+title+image_ext
if not os.path.exists(os.path.dirname(figure_path)):
os.makedirs(os.path.dirname(figure_path))
plt.savefig(figure_path)
def weight_path_to_title(wp):
"""Get a title from a model's weight path
Arguments:
wp: path to model's weights.
Returns:
str: a reformatted string
"""
return wp.split('/')[-1].replace('__', '-').split('.')[0]
| bsd-3-clause |
dingocuster/scikit-learn | sklearn/preprocessing/tests/test_label.py | 156 | 17626 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
MartinSavc/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
mlindauer/AutoFolio | autofolio/selector/ind_regression.py | 1 | 3629 | import logging
import traceback
import numpy as np
import pandas as pd
from ConfigSpace.hyperparameters import CategoricalHyperparameter, \
UniformFloatHyperparameter, UniformIntegerHyperparameter
from ConfigSpace.conditions import EqualsCondition, InCondition
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace import Configuration
from aslib_scenario.aslib_scenario import ASlibScenario
__author__ = "Marius Lindauer"
__license__ = "BSD"
class IndRegression(object):
@staticmethod
def add_params(cs: ConfigurationSpace):
'''
adds parameters to ConfigurationSpace
'''
selector = cs.get_hyperparameter("selector")
regressor = cs.get_hyperparameter("regressor")
if "IndRegressor" in selector.choices:
cond = InCondition(child=regressor, parent=selector, values=["IndRegressor"])
cs.add_condition(cond)
def __init__(self, regressor_class):
'''
Constructor
'''
self.regressors = []
self.logger = logging.getLogger("IndRegressor")
self.regressor_class = regressor_class
def fit(self, scenario: ASlibScenario, config: Configuration):
'''
fit pca object to ASlib scenario data
Arguments
---------
scenario: data.aslib_scenario.ASlibScenario
ASlib Scenario with all data in pandas
config: ConfigSpace.Configuration
configuration
'''
self.logger.info("Fit PairwiseRegressor with %s" %
(self.regressor_class))
self.algorithms = scenario.algorithms
n_algos = len(scenario.algorithms)
X = scenario.feature_data.values
for i in range(n_algos):
y = scenario.performance_data[scenario.algorithms[i]].values
reg = self.regressor_class()
reg.fit(X, y, config)
self.regressors.append(reg)
def predict(self, scenario: ASlibScenario):
'''
predict schedules for all instances in ASLib scenario data
Arguments
---------
scenario: data.aslib_scenario.ASlibScenario
ASlib Scenario with all data in pandas
Returns
-------
schedule: {inst -> (solver, time)}
schedule of solvers with a running time budget
'''
if scenario.algorithm_cutoff_time:
cutoff = scenario.algorithm_cutoff_time
else:
cutoff = 2**31
n_algos = len(scenario.algorithms)
X = scenario.feature_data.values
scores = np.zeros((X.shape[0], n_algos))
for i in range(n_algos):
reg = self.regressors[i]
Y = reg.predict(X)
scores[:, i] += Y
#self.logger.debug(
# sorted(list(zip(scenario.algorithms, scores)), key=lambda x: x[1], reverse=True))
algo_indx = np.argmin(scores, axis=1)
schedules = dict((str(inst),[s]) for s,inst in zip([(scenario.algorithms[i], cutoff+1) for i in algo_indx], scenario.feature_data.index))
#self.logger.debug(schedules)
return schedules
def get_attributes(self):
'''
returns a list of tuples of (attribute,value)
for all learned attributes
Returns
-------
list of tuples of (attribute,value)
'''
reg_attr = self.regressors[0].get_attributes()
attr = [{self.regressor_class.__name__:reg_attr}]
return attr | bsd-2-clause |
johnmwalters/ThinkStats2 | code/regression.py | 62 | 9652 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import pandas
import random
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import re
import chap01soln
import first
import linear
import thinkplot
import thinkstats2
def QuickLeastSquares(xs, ys):
"""Estimates linear least squares fit and returns MSE.
xs: sequence of values
ys: sequence of values
returns: inter, slope, mse
"""
n = float(len(xs))
meanx = xs.mean()
dxs = xs - meanx
varx = np.dot(dxs, dxs) / n
meany = ys.mean()
dys = ys - meany
cov = np.dot(dxs, dys) / n
slope = cov / varx
inter = meany - slope * meanx
res = ys - (inter + slope * xs)
mse = np.dot(res, res) / n
return inter, slope, mse
def ReadVariables():
"""Reads Stata dictionary files for NSFG data.
returns: DataFrame that maps variables names to descriptions
"""
vars1 = thinkstats2.ReadStataDct('2002FemPreg.dct').variables
vars2 = thinkstats2.ReadStataDct('2002FemResp.dct').variables
all_vars = vars1.append(vars2)
all_vars.index = all_vars.name
return all_vars
def JoinFemResp(df):
"""Reads the female respondent file and joins on caseid.
df: DataFrame
"""
resp = chap01soln.ReadFemResp()
resp.index = resp.caseid
join = df.join(resp, on='caseid', rsuffix='_r')
# convert from colon-separated time strings to datetimes
join.screentime = pandas.to_datetime(join.screentime)
return join
def GoMining(df):
"""Searches for variables that predict birth weight.
df: DataFrame of pregnancy records
returns: list of (rsquared, variable name) pairs
"""
variables = []
for name in df.columns:
try:
if df[name].var() < 1e-7:
continue
formula = 'totalwgt_lb ~ agepreg + ' + name
formula = formula.encode('ascii')
model = smf.ols(formula, data=df)
if model.nobs < len(df)/2:
continue
results = model.fit()
except (ValueError, TypeError):
continue
variables.append((results.rsquared, name))
return variables
def MiningReport(variables, n=30):
"""Prints variables with the highest R^2.
t: list of (R^2, variable name) pairs
n: number of pairs to print
"""
all_vars = ReadVariables()
variables.sort(reverse=True)
for mse, name in variables[:n]:
key = re.sub('_r$', '', name)
try:
desc = all_vars.loc[key].desc
if isinstance(desc, pandas.Series):
desc = desc[0]
print(name, mse, desc)
except KeyError:
print(name, mse)
def PredictBirthWeight(live):
"""Predicts birth weight of a baby at 30 weeks.
live: DataFrame of live births
"""
live = live[live.prglngth>30]
join = JoinFemResp(live)
t = GoMining(join)
MiningReport(t)
formula = ('totalwgt_lb ~ agepreg + C(race) + babysex==1 + '
'nbrnaliv>1 + paydu==1 + totincr')
results = smf.ols(formula, data=join).fit()
SummarizeResults(results)
def SummarizeResults(results):
"""Prints the most important parts of linear regression results:
results: RegressionResults object
"""
for name, param in results.params.iteritems():
pvalue = results.pvalues[name]
print('%s %0.3g (%.3g)' % (name, param, pvalue))
try:
print('R^2 %.4g' % results.rsquared)
ys = results.model.endog
print('Std(ys) %.4g' % ys.std())
print('Std(res) %.4g' % results.resid.std())
except AttributeError:
print('R^2 %.4g' % results.prsquared)
def RunSimpleRegression(live):
"""Runs a simple regression and compare results to thinkstats2 functions.
live: DataFrame of live births
"""
# run the regression with thinkstats2 functions
live_dropna = live.dropna(subset=['agepreg', 'totalwgt_lb'])
ages = live_dropna.agepreg
weights = live_dropna.totalwgt_lb
inter, slope = thinkstats2.LeastSquares(ages, weights)
res = thinkstats2.Residuals(ages, weights, inter, slope)
r2 = thinkstats2.CoefDetermination(weights, res)
# run the regression with statsmodels
formula = 'totalwgt_lb ~ agepreg'
model = smf.ols(formula, data=live)
results = model.fit()
SummarizeResults(results)
def AlmostEquals(x, y, tol=1e-6):
return abs(x-y) < tol
assert(AlmostEquals(results.params['Intercept'], inter))
assert(AlmostEquals(results.params['agepreg'], slope))
assert(AlmostEquals(results.rsquared, r2))
def PivotTables(live):
"""Prints a pivot table comparing first babies to others.
live: DataFrame of live births
"""
table = pandas.pivot_table(live, rows='isfirst',
values=['totalwgt_lb', 'agepreg'])
print(table)
def FormatRow(results, columns):
"""Converts regression results to a string.
results: RegressionResults object
returns: string
"""
t = []
for col in columns:
coef = results.params.get(col, np.nan)
pval = results.pvalues.get(col, np.nan)
if np.isnan(coef):
s = '--'
elif pval < 0.001:
s = '%0.3g (*)' % (coef)
else:
s = '%0.3g (%0.2g)' % (coef, pval)
t.append(s)
try:
t.append('%.2g' % results.rsquared)
except AttributeError:
t.append('%.2g' % results.prsquared)
return t
def RunModels(live):
"""Runs regressions that predict birth weight.
live: DataFrame of pregnancy records
"""
columns = ['isfirst[T.True]', 'agepreg', 'agepreg2']
header = ['isfirst', 'agepreg', 'agepreg2']
rows = []
formula = 'totalwgt_lb ~ isfirst'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ isfirst + agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
live['agepreg2'] = live.agepreg**2
formula = 'totalwgt_lb ~ isfirst + agepreg + agepreg2'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
PrintTabular(rows, header)
def PrintTabular(rows, header):
"""Prints results in LaTeX tabular format.
rows: list of rows
header: list of strings
"""
s = r'\hline ' + ' & '.join(header) + r' \\ \hline'
print(s)
for row in rows:
s = ' & '.join(row) + r' \\'
print(s)
print(r'\hline')
def LogisticRegressionExample():
"""Runs a simple example of logistic regression and prints results.
"""
y = np.array([0, 1, 0, 1])
x1 = np.array([0, 0, 0, 1])
x2 = np.array([0, 1, 1, 1])
beta = [-1.5, 2.8, 1.1]
log_o = beta[0] + beta[1] * x1 + beta[2] * x2
print(log_o)
o = np.exp(log_o)
print(o)
p = o / (o+1)
print(p)
like = y * p + (1-y) * (1-p)
print(like)
print(np.prod(like))
df = pandas.DataFrame(dict(y=y, x1=x1, x2=x2))
results = smf.logit('y ~ x1 + x2', data=df).fit()
print(results.summary())
def RunLogisticModels(live):
"""Runs regressions that predict sex.
live: DataFrame of pregnancy records
"""
#live = linear.ResampleRowsWeighted(live)
df = live[live.prglngth>30]
df['boy'] = (df.babysex==1).astype(int)
df['isyoung'] = (df.agepreg<20).astype(int)
df['isold'] = (df.agepreg<35).astype(int)
df['season'] = (((df.datend+1) % 12) / 3).astype(int)
# run the simple model
model = smf.logit('boy ~ agepreg', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# run the complex model
model = smf.logit('boy ~ agepreg + hpagelb + birthord + C(race)', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# make the scatter plot
exog = pandas.DataFrame(model.exog, columns=model.exog_names)
endog = pandas.DataFrame(model.endog, columns=[model.endog_names])
xs = exog['agepreg']
lo = results.fittedvalues
o = np.exp(lo)
p = o / (o+1)
#thinkplot.Scatter(xs, p, alpha=0.1)
#thinkplot.Show()
# compute accuracy
actual = endog['boy']
baseline = actual.mean()
predict = (results.predict() >= 0.5)
true_pos = predict * actual
true_neg = (1 - predict) * (1 - actual)
acc = (sum(true_pos) + sum(true_neg)) / len(actual)
print(acc, baseline)
columns = ['agepreg', 'hpagelb', 'birthord', 'race']
new = pandas.DataFrame([[35, 39, 3, 1]], columns=columns)
y = results.predict(new)
print(y)
def main(name, data_dir='.'):
thinkstats2.RandomSeed(17)
LogisticRegressionExample()
live, firsts, others = first.MakeFrames()
live['isfirst'] = (live.birthord == 1)
RunLogisticModels(live)
RunSimpleRegression(live)
RunModels(live)
PredictBirthWeight(live)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
rew4332/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 8 | 21806 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
if batch_size is None:
batch_size = x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
y_shape = list(y_shape[1:]) if len(y_shape) > 1 else []
# Skip first dimension if it is 1.
if y_shape and y_shape[0] == 1:
y_shape = y_shape[1:]
if n_classes is not None and n_classes > 1:
output_shape = [batch_size] + y_shape + [n_classes]
else:
output_shape = [batch_size] + y_shape
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(
x, y, n_classes, batch_size=None, shuffle=True, epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or iterable.
y: numpy, pandas or Dask array or iterable.
n_classes: number of classes.
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
chunk = []
for data in x:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or iterable.
batch_size: Size of batches to split data into.
If `None`, returns one batch of full size.
Returns:
List or iterator of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(
self, x, y, n_classes, batch_size=None, shuffle=True, random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: Feature Nd numpy matrix of shape `[n_samples, n_features, ...]`.
y: Target vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence
of targets. Can be `None` for unsupervised setting.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion.
batch_size: Mini-batch size to accumulate.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features.
y: Input target.
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input.
output_shape: Shape of the output.
input_dtype: DType of input.
output_dtype: DType of output.
"""
self._x = check_array(x, dtype=x.dtype)
# self.n_classes is None means we're passing in raw target indices.
y_dtype = (
np.int64 if n_classes is not None and n_classes > 1 else np.float32)
if n_classes is not None:
self._y = (None if y is None else check_array(y, dtype=y_dtype))
elif isinstance(y, list):
self._y = np.array(y)
else:
self._y = y
self.n_classes = n_classes
self.max_epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
self._x.shape, None if self._y is None else self._y.shape, n_classes,
batch_size)
# Input dtype matches dtype of x.
self._input_dtype = _check_dtype(self._x.dtype)
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None or self._y is None:
self._output_dtype = np.float32
else:
self._output_dtype = _check_dtype(self._y.dtype)
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
if self._shuffle:
self.indices = self.random_state.permutation(self._x.shape[0])
else:
self.indices = np.array(range(self._x.shape[0]))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(dtypes.int32, [1],
name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
input_shape = [None] + self.input_shape[1:]
self._input_placeholder = array_ops.placeholder(
dtypes.as_dtype(self._input_dtype),
input_shape,
name='input')
if self.output_shape is None:
self._output_placeholder = None
else:
output_shape = [None] + self.output_shape[1:]
self._output_placeholder = array_ops.placeholder(
dtypes.as_dtype(self._output_dtype),
output_shape,
name='output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be None.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
end = min(self._x.shape[0], self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# Assign input features from random indices.
inp = (
np.array(_access(self._x, batch_indices)).reshape(
(batch_indices.shape[0], 1))
if len(self._x.shape) == 1 else _access(self._x, batch_indices))
feed_dict[self._input_placeholder.name] = inp
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= self._x.shape[0]:
self.indices = self.random_state.permutation(self._x.shape[0])
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# assign labels from random indices
self.output_shape[0] = batch_indices.shape[0]
out = np.zeros(self.output_shape, dtype=self._output_dtype)
for i in xrange(out.shape[0]):
sample = batch_indices[i]
# self.n_classes is None means we're passing in raw target indices
if self.n_classes is None:
out[i] = _access(self._y, sample)
else:
if self.n_classes > 1:
if len(self.output_shape) == 2:
out.itemset((i, int(_access(self._y, sample))), 1.0)
else:
for idx, value in enumerate(_access(self._y, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(self._y, sample)
feed_dict[self._output_placeholder.name] = out
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the target has.
batch_size: Mini batch size to accumulate.
Attributes:
x: input features.
y: input target.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
[1] + list(x_first_el.shape),
[1] + list(y_first_el.shape) if y is not None else None,
n_classes,
batch_size)
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and n_classes > 0:
self._output_dtype = np.float32
elif y is not None:
if isinstance(y_first_el, list) or isinstance(y_first_el, np.ndarray):
self._output_dtype = _check_dtype(np.dtype(type(y_first_el[0])))
else:
self._output_dtype = _check_dtype(np.dtype(type(y_first_el)))
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
Dict of input and output tensors.
"""
if self.stopped:
raise StopIteration
inp = np.zeros(self.input_shape, dtype=self._input_dtype)
if self._y is not None:
out = np.zeros(self.output_shape, dtype=self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
inp[i, :] = six.next(self._x)
except StopIteration:
self.stopped = True
inp = inp[:i, :]
if self._y is not None:
out = out[:i]
break
if self._y is not None:
y = six.next(self._y)
if self.n_classes is not None and self.n_classes > 1:
if len(self.output_shape) == 2:
out.itemset((i, y), 1.0)
else:
for idx, value in enumerate(y):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = y
if self._y is None:
return {self._input_placeholder.name: inp}
return {self._input_placeholder.name: inp,
self._output_placeholder.name: out}
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self, x, y, n_classes, batch_size, shuffle=True,
random_state=None, epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the target has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input target.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output targets.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp,
output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
beeleb/repository | newone.py | 1 | 2270 | from __future__ import division # (p,q)区間をn-1等分するので小数が含まれる場合に対応させる
from numpy import linspace
from numpy import fabs
from numpy import array
from mpl_toolkits.axes_grid.axislines import SubplotZero
import matplotlib.pyplot as plt
# ここから下に変数が入る
def f(x, a):
return a*x-x**2 # 包絡線の式を入れる
p = -3 # xの最小値
q = 3 # xの最大値
n = 12 # 引く包絡線の数
a_min = -10 # 表示させるaの最小値
a_max = 10 # 表示させるaの最大値
y_min = -6 # 表示させるbの最小値(最大値はa軸とb軸の縮尺が1:1になるよう自動で決まる)
# アスペクト比を定めただけだと異常に縦長なグラフが出てくるのでylimを定めた
y_max = y_min+a_max-a_min # これは変数ではない
plt.figtext(0.85, 0.35, '$a$') # 直接位置を指定しているので、グラフの位置を変えるときにこれも変える
plt.figtext(0.5, 0.95, '$b$')
# ここより上に変数が入る
fig = plt.figure(1)
ax = SubplotZero(fig, 111)
fig.add_subplot(ax)
ax.axhline(linewidth=1.0, color="black")
ax.axvline(linewidth=1.0, color="black")
ax.set_xticks([]) # 空のlistを指定することでticksが入らない
ax.set_yticks([])
ax.set(aspect=1)
for direction in ["xzero", "yzero"]:
ax.axis[direction].set_axisline_style("-|>")
ax.axis[direction].set_visible(True)
plt.ylim(ymin=y_min) # この位置より前に置くとx方向が狭くなってしまった
plt.ylim(ymax=y_max)
a = linspace(a_min, a_max, (a_max-a_min) * 10) # 点の数はaの動く範囲の長さ×10,これで曲線にも対応する
# linspaceの点の数に小数が来ることがあり得るのですが、その場合は勝手に小数点以下を切り捨てた数の点をとってくれるようです
for i in range(n):
r = p+(q-p)*i/(n-1) # n個の接線を引き2個は両端にあるので区間はn-1等分される
b = f(r, a)
ax.plot(a, b, 'k', linewidth=0.5, alpha=1)
# linewidth:線の太さ, alpha:濃さ(1以下), 黒色の線は'k'
plt.show()
# plt.savefig('envelopeX.png', bbox_inches='tight', pad_inches=0)
# plt.savefig('test2.pdf,bbox_inches='tight',pad_inches=0)
# それぞれ画像保存用,PDF保存用 | gpl-3.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/core/indexing.py | 3 | 73228 | # pylint: disable=W0223
import warnings
import numpy as np
from pandas.compat import range, zip
import pandas.compat as compat
from pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries
from pandas.core.dtypes.common import (
is_integer_dtype,
is_integer, is_float,
is_list_like,
is_sequence,
is_iterator,
is_scalar,
is_sparse,
_is_unorderable_exception,
_ensure_platform_int)
from pandas.core.dtypes.missing import isnull, _infer_fill_value
from pandas.core.index import Index, MultiIndex
import pandas.core.common as com
from pandas.core.common import (is_bool_indexer, _asarray_tuplesafe,
is_null_slice, is_full_slice,
_values_from_object)
# the supported indexers
def get_indexers_list():
return [
('ix', _IXIndexer),
('iloc', _iLocIndexer),
('loc', _LocIndexer),
('at', _AtIndexer),
('iat', _iAtIndexer),
]
# "null slice"
_NS = slice(None, None)
# the public IndexSlicerMaker
class _IndexSlice(object):
"""
Create an object to more easily perform multi-index slicing
Examples
--------
>>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']])
>>> columns = ['foo', 'bar']
>>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))),
index=midx, columns=columns)
Using the default slice command:
>>> dfmi.loc[(slice(None), slice('B0', 'B1')), :]
foo bar
A0 B0 0 1
B1 2 3
A1 B0 8 9
B1 10 11
Using the IndexSlice class for a more intuitive command:
>>> idx = pd.IndexSlice
>>> dfmi.loc[idx[:, 'B0':'B1'], :]
foo bar
A0 B0 0 1
B1 2 3
A1 B0 8 9
B1 10 11
"""
def __getitem__(self, arg):
return arg
IndexSlice = _IndexSlice()
class IndexingError(Exception):
pass
class _NDFrameIndexer(object):
_valid_types = None
_exception = KeyError
axis = None
def __init__(self, obj, name):
self.obj = obj
self.ndim = obj.ndim
self.name = name
def __call__(self, axis=None):
# we need to return a copy of ourselves
new_self = self.__class__(self.obj, self.name)
new_self.axis = axis
return new_self
def __iter__(self):
raise NotImplementedError('ix is not iterable')
def __getitem__(self, key):
if type(key) is tuple:
key = tuple(com._apply_if_callable(x, self.obj) for x in key)
try:
values = self.obj.get_value(*key)
if is_scalar(values):
return values
except Exception:
pass
return self._getitem_tuple(key)
else:
key = com._apply_if_callable(key, self.obj)
return self._getitem_axis(key, axis=0)
def _get_label(self, label, axis=0):
if self.ndim == 1:
# for perf reasons we want to try _xs first
# as its basically direct indexing
# but will fail when the index is not present
# see GH5667
try:
return self.obj._xs(label, axis=axis)
except:
return self.obj[label]
elif isinstance(label, tuple) and isinstance(label[axis], slice):
raise IndexingError('no slices here, handle elsewhere')
return self.obj._xs(label, axis=axis)
def _get_loc(self, key, axis=0):
return self.obj._ixs(key, axis=axis)
def _slice(self, obj, axis=0, kind=None):
return self.obj._slice(obj, axis=axis, kind=kind)
def _get_setitem_indexer(self, key):
if self.axis is not None:
return self._convert_tuple(key, is_setter=True)
axis = self.obj._get_axis(0)
if isinstance(axis, MultiIndex):
try:
return axis.get_loc(key)
except Exception:
pass
if isinstance(key, tuple):
try:
return self._convert_tuple(key, is_setter=True)
except IndexingError:
pass
if isinstance(key, range):
return self._convert_range(key, is_setter=True)
try:
return self._convert_to_indexer(key, is_setter=True)
except TypeError as e:
# invalid indexer type vs 'other' indexing errors
if 'cannot do' in str(e):
raise
raise IndexingError(key)
def __setitem__(self, key, value):
if isinstance(key, tuple):
key = tuple(com._apply_if_callable(x, self.obj) for x in key)
else:
key = com._apply_if_callable(key, self.obj)
indexer = self._get_setitem_indexer(key)
self._setitem_with_indexer(indexer, value)
def _has_valid_type(self, k, axis):
raise NotImplementedError()
def _has_valid_tuple(self, key):
""" check the key for valid keys across my indexer """
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if not self._has_valid_type(k, i):
raise ValueError("Location based indexing can only have [%s] "
"types" % self._valid_types)
def _should_validate_iterable(self, axis=0):
""" return a boolean whether this axes needs validation for a passed
iterable
"""
ax = self.obj._get_axis(axis)
if isinstance(ax, MultiIndex):
return False
elif ax.is_floating():
return False
return True
def _is_nested_tuple_indexer(self, tup):
if any([isinstance(ax, MultiIndex) for ax in self.obj.axes]):
return any([is_nested_tuple(tup, ax) for ax in self.obj.axes])
return False
def _convert_tuple(self, key, is_setter=False):
keyidx = []
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
for i in range(self.ndim):
if i == axis:
keyidx.append(self._convert_to_indexer(
key, axis=axis, is_setter=is_setter))
else:
keyidx.append(slice(None))
else:
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter)
keyidx.append(idx)
return tuple(keyidx)
def _convert_range(self, key, is_setter=False):
""" convert a range argument """
return list(key)
def _convert_scalar_indexer(self, key, axis):
# if we are accessing via lowered dim, use the last dim
ax = self.obj._get_axis(min(axis, self.ndim - 1))
# a scalar
return ax._convert_scalar_indexer(key, kind=self.name)
def _convert_slice_indexer(self, key, axis):
# if we are accessing via lowered dim, use the last dim
ax = self.obj._get_axis(min(axis, self.ndim - 1))
return ax._convert_slice_indexer(key, kind=self.name)
def _has_valid_setitem_indexer(self, indexer):
return True
def _has_valid_positional_setitem_indexer(self, indexer):
""" validate that an positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally
"""
if isinstance(indexer, dict):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
else:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
for ax, i in zip(self.obj.axes, indexer):
if isinstance(i, slice):
# should check the stop slice?
pass
elif is_list_like_indexer(i):
# should check the elements?
pass
elif is_integer(i):
if i >= len(ax):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
elif isinstance(i, dict):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
return True
def _setitem_with_indexer(self, indexer, value):
self._has_valid_setitem_indexer(indexer)
# also has the side effect of consolidating in-place
# TODO: Panel, DataFrame are not imported, remove?
from pandas import Panel, DataFrame, Series # noqa
info_axis = self.obj._info_axis_number
# maybe partial set
take_split_path = self.obj._is_mixed_type
# if there is only one block/type, still have to take split path
# unless the block is one-dimensional or it can hold the value
if not take_split_path and self.obj._data.blocks:
blk, = self.obj._data.blocks
if 1 < blk.ndim: # in case of dict, keys are indices
val = list(value.values()) if isinstance(value,
dict) else value
take_split_path = not blk._can_hold_element(val)
if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes):
for i, ax in zip(indexer, self.obj.axes):
# if we have any multi-indexes that have non-trivial slices
# (not null slices) then we must take the split path, xref
# GH 10360
if (isinstance(ax, MultiIndex) and
not (is_integer(i) or is_null_slice(i))):
take_split_path = True
break
if isinstance(indexer, tuple):
nindexer = []
for i, idx in enumerate(indexer):
if isinstance(idx, dict):
# reindex the axis to the new value
# and set inplace
key, _ = convert_missing_indexer(idx)
# if this is the items axes, then take the main missing
# path first
# this correctly sets the dtype and avoids cache issues
# essentially this separates out the block that is needed
# to possibly be modified
if self.ndim > 1 and i == self.obj._info_axis_number:
# add the new item, and set the value
# must have all defined axes if we have a scalar
# or a list-like on the non-info axes if we have a
# list-like
len_non_info_axes = [
len(_ax) for _i, _ax in enumerate(self.obj.axes)
if _i != i
]
if any([not l for l in len_non_info_axes]):
if not is_list_like_indexer(value):
raise ValueError("cannot set a frame with no "
"defined index and a scalar")
self.obj[key] = value
return self.obj
# add a new item with the dtype setup
self.obj[key] = _infer_fill_value(value)
new_indexer = convert_from_missing_indexer_tuple(
indexer, self.obj.axes)
self._setitem_with_indexer(new_indexer, value)
return self.obj
# reindex the axis
# make sure to clear the cache because we are
# just replacing the block manager here
# so the object is the same
index = self.obj._get_axis(i)
labels = index.insert(len(index), key)
self.obj._data = self.obj.reindex_axis(labels, i)._data
self.obj._maybe_update_cacher(clear=True)
self.obj.is_copy = None
nindexer.append(labels.get_loc(key))
else:
nindexer.append(idx)
indexer = tuple(nindexer)
else:
indexer, missing = convert_missing_indexer(indexer)
if missing:
# reindex the axis to the new value
# and set inplace
if self.ndim == 1:
index = self.obj.index
new_index = index.insert(len(index), indexer)
# we have a coerced indexer, e.g. a float
# that matches in an Int64Index, so
# we will not create a duplicate index, rather
# index to that element
# e.g. 0.0 -> 0
# GH12246
if index.is_unique:
new_indexer = index.get_indexer([new_index[-1]])
if (new_indexer != -1).any():
return self._setitem_with_indexer(new_indexer,
value)
# this preserves dtype of the value
new_values = Series([value])._values
if len(self.obj._values):
try:
new_values = np.concatenate([self.obj._values,
new_values])
except TypeError:
new_values = np.concatenate([self.obj.asobject,
new_values])
self.obj._data = self.obj._constructor(
new_values, index=new_index, name=self.obj.name)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
elif self.ndim == 2:
# no columns and scalar
if not len(self.obj.columns):
raise ValueError("cannot set a frame with no defined "
"columns")
# append a Series
if isinstance(value, Series):
value = value.reindex(index=self.obj.columns,
copy=True)
value.name = indexer
# a list-list
else:
# must have conforming columns
if is_list_like_indexer(value):
if len(value) != len(self.obj.columns):
raise ValueError("cannot set a row with "
"mismatched columns")
value = Series(value, index=self.obj.columns,
name=indexer)
self.obj._data = self.obj.append(value)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
# set using setitem (Panel and > dims)
elif self.ndim >= 3:
return self.obj.__setitem__(indexer, value)
# set
item_labels = self.obj._get_axis(info_axis)
# align and set the values
if take_split_path:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
if isinstance(value, ABCSeries):
value = self._align_series(indexer, value)
info_idx = indexer[info_axis]
if is_integer(info_idx):
info_idx = [info_idx]
labels = item_labels[info_idx]
# if we have a partial multiindex, then need to adjust the plane
# indexer here
if (len(labels) == 1 and
isinstance(self.obj[labels[0]].axes[0], MultiIndex)):
item = labels[0]
obj = self.obj[item]
index = obj.index
idx = indexer[:info_axis][0]
plane_indexer = tuple([idx]) + indexer[info_axis + 1:]
lplane_indexer = length_of_indexer(plane_indexer[0], index)
# require that we are setting the right number of values that
# we are indexing
if is_list_like_indexer(value) and np.iterable(
value) and lplane_indexer != len(value):
if len(obj[idx]) != len(value):
raise ValueError("cannot set using a multi-index "
"selection indexer with a different "
"length than the value")
# make sure we have an ndarray
value = getattr(value, 'values', value).ravel()
# we can directly set the series here
# as we select a slice indexer on the mi
idx = index._convert_slice_indexer(idx)
obj._consolidate_inplace()
obj = obj.copy()
obj._data = obj._data.setitem(indexer=tuple([idx]),
value=value)
self.obj[item] = obj
return
# non-mi
else:
plane_indexer = indexer[:info_axis] + indexer[info_axis + 1:]
if info_axis > 0:
plane_axis = self.obj.axes[:info_axis][0]
lplane_indexer = length_of_indexer(plane_indexer[0],
plane_axis)
else:
lplane_indexer = 0
def setter(item, v):
s = self.obj[item]
pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
# perform the equivalent of a setitem on the info axis
# as we have a null slice or a slice with full bounds
# which means essentially reassign to the columns of a
# multi-dim object
# GH6149 (null slice), GH10408 (full bounds)
if (isinstance(pi, tuple) and
all(is_null_slice(idx) or
is_full_slice(idx, len(self.obj))
for idx in pi)):
s = v
else:
# set the item, possibly having a dtype change
s._consolidate_inplace()
s = s.copy()
s._data = s._data.setitem(indexer=pi, value=v)
s._maybe_update_cacher(clear=True)
# reset the sliced object if unique
self.obj[item] = s
def can_do_equal_len():
""" return True if we have an equal len settable """
if not len(labels) == 1 or not np.iterable(value):
return False
l = len(value)
item = labels[0]
index = self.obj[item].index
# equal len list/ndarray
if len(index) == l:
return True
elif lplane_indexer == l:
return True
return False
# we need an iterable, with a ndim of at least 1
# eg. don't pass through np.array(0)
if is_list_like_indexer(value) and getattr(value, 'ndim', 1) > 0:
# we have an equal len Frame
if isinstance(value, ABCDataFrame) and value.ndim > 1:
sub_indexer = list(indexer)
multiindex_indexer = isinstance(labels, MultiIndex)
for item in labels:
if item in value:
sub_indexer[info_axis] = item
v = self._align_series(
tuple(sub_indexer), value[item],
multiindex_indexer)
else:
v = np.nan
setter(item, v)
# we have an equal len ndarray/convertible to our labels
elif np.array(value).ndim == 2:
# note that this coerces the dtype if we are mixed
# GH 7551
value = np.array(value, dtype=object)
if len(labels) != value.shape[1]:
raise ValueError('Must have equal len keys and value '
'when setting with an ndarray')
for i, item in enumerate(labels):
# setting with a list, recoerces
setter(item, value[:, i].tolist())
# we have an equal len list/ndarray
elif can_do_equal_len():
setter(labels[0], value)
# per label values
else:
if len(labels) != len(value):
raise ValueError('Must have equal len keys and value '
'when setting with an iterable')
for item, v in zip(labels, value):
setter(item, v)
else:
# scalar
for item in labels:
setter(item, value)
else:
if isinstance(indexer, tuple):
indexer = maybe_convert_ix(*indexer)
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
# logic here
if (len(indexer) > info_axis and
is_integer(indexer[info_axis]) and
all(is_null_slice(idx) for i, idx in enumerate(indexer)
if i != info_axis) and item_labels.is_unique):
self.obj[item_labels[indexer[info_axis]]] = value
return
if isinstance(value, (ABCSeries, dict)):
value = self._align_series(indexer, Series(value))
elif isinstance(value, ABCDataFrame):
value = self._align_frame(indexer, value)
if isinstance(value, ABCPanel):
value = self._align_panel(indexer, value)
# check for chained assignment
self.obj._check_is_chained_assignment_possible()
# actually do the set
self.obj._consolidate_inplace()
self.obj._data = self.obj._data.setitem(indexer=indexer,
value=value)
self.obj._maybe_update_cacher(clear=True)
def _align_series(self, indexer, ser, multiindex_indexer=False):
"""
Parameters
----------
indexer : tuple, slice, scalar
The indexer used to get the locations that will be set to
`ser`
ser : pd.Series
The values to assign to the locations specified by `indexer`
multiindex_indexer : boolean, optional
Defaults to False. Should be set to True if `indexer` was from
a `pd.MultiIndex`, to avoid unnecessary broadcasting.
Returns:
--------
`np.array` of `ser` broadcast to the appropriate shape for assignment
to the locations selected by `indexer`
"""
if isinstance(indexer, (slice, np.ndarray, list, Index)):
indexer = tuple([indexer])
if isinstance(indexer, tuple):
# flatten np.ndarray indexers
ravel = lambda i: i.ravel() if isinstance(i, np.ndarray) else i
indexer = tuple(map(ravel, indexer))
aligners = [not is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
obj = self.obj
# are we a single alignable value on a non-primary
# dim (e.g. panel: 1,2, or frame: 0) ?
# hence need to align to a single axis dimension
# rather that find all valid dims
# frame
if is_frame:
single_aligner = single_aligner and aligners[0]
# panel
elif is_panel:
single_aligner = (single_aligner and
(aligners[1] or aligners[2]))
# we have a frame, with multiple indexers on both axes; and a
# series, so need to broadcast (see GH5206)
if (sum_aligners == self.ndim and
all([is_sequence(_) for _ in indexer])):
ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
# single indexer
if len(indexer) > 1 and not multiindex_indexer:
l = len(indexer[1])
ser = np.tile(ser, l).reshape(l, -1).T
return ser
for i, idx in enumerate(indexer):
ax = obj.axes[i]
# multiple aligners (or null slices)
if is_sequence(idx) or isinstance(idx, slice):
if single_aligner and is_null_slice(idx):
continue
new_ix = ax[idx]
if not is_list_like_indexer(new_ix):
new_ix = Index([new_ix])
else:
new_ix = Index(new_ix)
if ser.index.equals(new_ix) or not len(new_ix):
return ser._values.copy()
return ser.reindex(new_ix)._values
# 2 dims
elif single_aligner and is_frame:
# reindex along index
ax = self.obj.axes[1]
if ser.index.equals(ax) or not len(ax):
return ser._values.copy()
return ser.reindex(ax)._values
# >2 dims
elif single_aligner:
broadcast = []
for n, labels in enumerate(self.obj._get_plane_axes(i)):
# reindex along the matching dimensions
if len(labels & ser.index):
ser = ser.reindex(labels)
else:
broadcast.append((n, len(labels)))
# broadcast along other dims
ser = ser._values.copy()
for (axis, l) in broadcast:
shape = [-1] * (len(broadcast) + 1)
shape[axis] = l
ser = np.tile(ser, l).reshape(shape)
if self.obj.ndim == 3:
ser = ser.T
return ser
elif is_scalar(indexer):
ax = self.obj._get_axis(1)
if ser.index.equals(ax):
return ser._values.copy()
return ser.reindex(ax)._values
raise ValueError('Incompatible indexer with Series')
def _align_frame(self, indexer, df):
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
if isinstance(indexer, tuple):
aligners = [not is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
# TODO: single_aligner is not used
single_aligner = sum_aligners == 1 # noqa
idx, cols = None, None
sindexers = []
for i, ix in enumerate(indexer):
ax = self.obj.axes[i]
if is_sequence(ix) or isinstance(ix, slice):
if idx is None:
idx = ax[ix].ravel()
elif cols is None:
cols = ax[ix].ravel()
else:
break
else:
sindexers.append(i)
# panel
if is_panel:
# need to conform to the convention
# as we are not selecting on the items axis
# and we have a single indexer
# GH 7763
if len(sindexers) == 1 and sindexers[0] != 0:
df = df.T
if idx is None:
idx = df.index
if cols is None:
cols = df.columns
if idx is not None and cols is not None:
if df.index.equals(idx) and df.columns.equals(cols):
val = df.copy()._values
else:
val = df.reindex(idx, columns=cols)._values
return val
elif ((isinstance(indexer, slice) or is_list_like_indexer(indexer)) and
is_frame):
ax = self.obj.index[indexer]
if df.index.equals(ax):
val = df.copy()._values
else:
# we have a multi-index and are trying to align
# with a particular, level GH3738
if (isinstance(ax, MultiIndex) and
isinstance(df.index, MultiIndex) and
ax.nlevels != df.index.nlevels):
raise TypeError("cannot align on a multi-index with out "
"specifying the join levels")
val = df.reindex(index=ax)._values
return val
elif is_scalar(indexer) and is_panel:
idx = self.obj.axes[1]
cols = self.obj.axes[2]
# by definition we are indexing on the 0th axis
# a passed in dataframe which is actually a transpose
# of what is needed
if idx.equals(df.index) and cols.equals(df.columns):
return df.copy()._values
return df.reindex(idx, columns=cols)._values
raise ValueError('Incompatible indexer with DataFrame')
def _align_panel(self, indexer, df):
# TODO: is_frame, is_panel are unused
is_frame = self.obj.ndim == 2 # noqa
is_panel = self.obj.ndim >= 3 # noqa
raise NotImplementedError("cannot set using an indexer with a Panel "
"yet!")
def _getitem_tuple(self, tup):
try:
return self._getitem_lowerdim(tup)
except IndexingError:
pass
# no multi-index, so validate all of the indexers
self._has_valid_tuple(tup)
# ugly hack for GH #836
if self._multi_take_opportunity(tup):
return self._multi_take(tup)
# no shortcut needed
retval = self.obj
for i, key in enumerate(tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if is_null_slice(key):
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
return retval
def _multi_take_opportunity(self, tup):
from pandas.core.generic import NDFrame
# ugly hack for GH #836
if not isinstance(self.obj, NDFrame):
return False
if not all(is_list_like_indexer(x) for x in tup):
return False
# just too complicated
for indexer, ax in zip(tup, self.obj._data.axes):
if isinstance(ax, MultiIndex):
return False
elif is_bool_indexer(indexer):
return False
elif not ax.is_unique:
return False
return True
def _multi_take(self, tup):
""" create the reindex map for our objects, raise the _exception if we
can't create the indexer
"""
try:
o = self.obj
d = dict(
[(a, self._convert_for_reindex(t, axis=o._get_axis_number(a)))
for t, a in zip(tup, o._AXIS_ORDERS)])
return o.reindex(**d)
except(KeyError, IndexingError):
raise self._exception
def _convert_for_reindex(self, key, axis=0):
labels = self.obj._get_axis(axis)
if is_bool_indexer(key):
key = check_bool_indexer(labels, key)
return labels[key]
else:
if isinstance(key, Index):
keyarr = labels._convert_index_indexer(key)
else:
# asarray can be unsafe, NumPy strings are weird
keyarr = _asarray_tuplesafe(key)
if is_integer_dtype(keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
keyarr = labels._convert_arr_indexer(keyarr)
if not labels.is_integer():
keyarr = _ensure_platform_int(keyarr)
return labels.take(keyarr)
return keyarr
def _handle_lowerdim_multi_index_axis0(self, tup):
# we have an axis0 multi-index, handle or raise
try:
# fast path for series or for tup devoid of slices
return self._get_label(tup, axis=0)
except TypeError:
# slices are unhashable
pass
except Exception as e1:
if isinstance(tup[0], (slice, Index)):
raise IndexingError("Handle elsewhere")
# raise the error if we are not sorted
ax0 = self.obj._get_axis(0)
if not ax0.is_lexsorted_for_tuple(tup):
raise e1
return None
def _getitem_lowerdim(self, tup):
# we can directly get the axis result since the axis is specified
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
return self._getitem_axis(tup, axis=axis)
# we may have a nested tuples indexer here
if self._is_nested_tuple_indexer(tup):
return self._getitem_nested_tuple(tup)
# we maybe be using a tuple to represent multiple dimensions here
ax0 = self.obj._get_axis(0)
# ...but iloc should handle the tuple as simple integer-location
# instead of checking it as multiindex representation (GH 13797)
if isinstance(ax0, MultiIndex) and self.name != 'iloc':
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
if len(tup) > self.obj.ndim:
raise IndexingError("Too many indexers. handle elsewhere")
# to avoid wasted computation
# df.ix[d1:d2, 0] -> columns first (True)
# df.ix[0, ['C', 'B', A']] -> rows first (False)
for i, key in enumerate(tup):
if is_label_like(key) or isinstance(key, tuple):
section = self._getitem_axis(key, axis=i)
# we have yielded a scalar ?
if not is_list_like_indexer(section):
return section
elif section.ndim == self.ndim:
# we're in the middle of slicing through a MultiIndex
# revise the key wrt to `section` by inserting an _NS
new_key = tup[:i] + (_NS,) + tup[i + 1:]
else:
new_key = tup[:i] + tup[i + 1:]
# unfortunately need an odious kludge here because of
# DataFrame transposing convention
if (isinstance(section, ABCDataFrame) and i > 0 and
len(new_key) == 2):
a, b = new_key
new_key = b, a
if len(new_key) == 1:
new_key, = new_key
# This is an elided recursive call to iloc/loc/etc'
return getattr(section, self.name)[new_key]
raise IndexingError('not applicable')
def _getitem_nested_tuple(self, tup):
# we have a nested tuple so have at least 1 multi-index level
# we should be able to match up the dimensionaility here
# we have too many indexers for our dim, but have at least 1
# multi-index dimension, try to see if we have something like
# a tuple passed to a series with a multi-index
if len(tup) > self.ndim:
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
# this is a series with a multi-index specified a tuple of
# selectors
return self._getitem_axis(tup, axis=0)
# handle the multi-axis by taking sections and reducing
# this is iterative
obj = self.obj
axis = 0
for i, key in enumerate(tup):
if is_null_slice(key):
axis += 1
continue
current_ndim = obj.ndim
obj = getattr(obj, self.name)._getitem_axis(key, axis=axis)
axis += 1
# if we have a scalar, we are done
if is_scalar(obj) or not hasattr(obj, 'ndim'):
break
# has the dim of the obj changed?
# GH 7199
if obj.ndim < current_ndim:
# GH 7516
# if had a 3 dim and are going to a 2d
# axes are reversed on a DataFrame
if i >= 1 and current_ndim == 3 and obj.ndim == 2:
obj = obj.T
axis -= 1
return obj
def _getitem_axis(self, key, axis=0):
if self._should_validate_iterable(axis):
self._has_valid_type(key, axis)
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
elif (is_list_like_indexer(key) and
not (isinstance(key, tuple) and
isinstance(labels, MultiIndex))):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
else:
# maybe coerce a float scalar to integer
key = labels._maybe_cast_indexer(key)
if is_integer(key):
if axis == 0 and isinstance(labels, MultiIndex):
try:
return self._get_label(key, axis=axis)
except (KeyError, TypeError):
if self.obj.index.levels[0].is_integer():
raise
# this is the fallback! (for a non-float, non-integer index)
if not labels.is_floating() and not labels.is_integer():
return self._get_loc(key, axis=axis)
return self._get_label(key, axis=axis)
def _getitem_iterable(self, key, axis=0):
if self._should_validate_iterable(axis):
self._has_valid_type(key, axis)
labels = self.obj._get_axis(axis)
if is_bool_indexer(key):
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
return self.obj.take(inds, axis=axis, convert=False)
else:
# Have the index compute an indexer or return None
# if it cannot handle; we only act on all found values
indexer, keyarr = labels._convert_listlike_indexer(
key, kind=self.name)
if indexer is not None and (indexer != -1).all():
return self.obj.take(indexer, axis=axis)
# existing labels are unique and indexer are unique
if labels.is_unique and Index(keyarr).is_unique:
try:
return self.obj.reindex_axis(keyarr, axis=axis)
except AttributeError:
# Series
if axis != 0:
raise AssertionError('axis must be 0')
return self.obj.reindex(keyarr)
# existing labels are non-unique
else:
# reindex with the specified axis
if axis + 1 > self.obj.ndim:
raise AssertionError("invalid indexing error with "
"non-unique index")
new_target, indexer, new_indexer = labels._reindex_non_unique(
keyarr)
if new_indexer is not None:
result = self.obj.take(indexer[indexer != -1], axis=axis,
convert=False)
result = result._reindex_with_indexers(
{axis: [new_target, new_indexer]},
copy=True, allow_dups=True)
else:
result = self.obj.take(indexer, axis=axis, convert=False)
return result
def _convert_to_indexer(self, obj, axis=0, is_setter=False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
labels = self.obj._get_axis(axis)
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
# try to find out correct indexer, if not type correct raise
try:
obj = self._convert_scalar_indexer(obj, axis)
except TypeError:
# but we will allow setting
if is_setter:
pass
# see if we are positional in nature
is_int_index = labels.is_integer()
is_int_positional = is_integer(obj) and not is_int_index
# if we are a label return me
try:
return labels.get_loc(obj)
except LookupError:
if isinstance(obj, tuple) and isinstance(labels, MultiIndex):
if is_setter and len(obj) == labels.nlevels:
return {'key': obj}
raise
except TypeError:
pass
except (ValueError):
if not is_int_positional:
raise
# a positional
if is_int_positional:
# if we are setting and its not a valid location
# its an insert which fails by definition
if is_setter:
# always valid
if self.name == 'loc':
return {'key': obj}
# a positional
if (obj >= self.obj.shape[axis] and
not isinstance(labels, MultiIndex)):
raise ValueError("cannot set by positional indexing with "
"enlargement")
return obj
if is_nested_tuple(obj, labels):
return labels.get_locs(obj)
elif is_list_like_indexer(obj):
if is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
inds, = obj.nonzero()
return inds
else:
# Have the index compute an indexer or return None
# if it cannot handle
indexer, objarr = labels._convert_listlike_indexer(
obj, kind=self.name)
if indexer is not None:
return indexer
# unique index
if labels.is_unique:
indexer = check = labels.get_indexer(objarr)
# non-unique (dups)
else:
(indexer,
missing) = labels.get_indexer_non_unique(objarr)
# 'indexer' has dupes, create 'check' using 'missing'
check = np.zeros_like(objarr)
check[missing] = -1
mask = check == -1
if mask.any():
raise KeyError('%s not in index' % objarr[mask])
return _values_from_object(indexer)
else:
try:
return labels.get_loc(obj)
except LookupError:
# allow a not found key only if we are a setter
if not is_list_like_indexer(obj) and is_setter:
return {'key': obj}
raise
def _tuplify(self, loc):
tup = [slice(None, None) for _ in range(self.ndim)]
tup[0] = loc
return tuple(tup)
def _get_slice_axis(self, slice_obj, axis=0):
obj = self.obj
if not need_slice(slice_obj):
return obj
indexer = self._convert_slice_indexer(slice_obj, axis)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj.take(indexer, axis=axis, convert=False)
class _IXIndexer(_NDFrameIndexer):
"""A primarily label-location based indexer, with integer position
fallback.
``.ix[]`` supports mixed integer and label based access. It is
primarily label based, but will fall back to integer positional
access unless the corresponding axis is of integer type.
``.ix`` is the most general indexer and will support any of the
inputs in ``.loc`` and ``.iloc``. ``.ix`` also supports floating
point label schemes. ``.ix`` is exceptionally useful when dealing
with mixed positional and label based hierachical indexes.
However, when an axis is integer based, ONLY label based access
and not positional access is supported. Thus, in such cases, it's
usually better to be explicit and use ``.iloc`` or ``.loc``.
See more at :ref:`Advanced Indexing <advanced>`.
"""
def __init__(self, obj, name):
_ix_deprecation_warning = """
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated""" # noqa
warnings.warn(_ix_deprecation_warning,
DeprecationWarning, stacklevel=3)
super(_IXIndexer, self).__init__(obj, name)
def _has_valid_type(self, key, axis):
if isinstance(key, slice):
return True
elif is_bool_indexer(key):
return True
elif is_list_like_indexer(key):
return True
else:
self._convert_scalar_indexer(key, axis)
return True
class _LocationIndexer(_NDFrameIndexer):
_exception = Exception
def __getitem__(self, key):
if type(key) is tuple:
key = tuple(com._apply_if_callable(x, self.obj) for x in key)
try:
if self._is_scalar_access(key):
return self._getitem_scalar(key)
except (KeyError, IndexError):
pass
return self._getitem_tuple(key)
else:
key = com._apply_if_callable(key, self.obj)
return self._getitem_axis(key, axis=0)
def _is_scalar_access(self, key):
raise NotImplementedError()
def _getitem_scalar(self, key):
raise NotImplementedError()
def _getitem_axis(self, key, axis=0):
raise NotImplementedError()
def _getbool_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
try:
return self.obj.take(inds, axis=axis, convert=False)
except Exception as detail:
raise self._exception(detail)
def _get_slice_axis(self, slice_obj, axis=0):
""" this is pretty simple as we just have to deal with labels """
obj = self.obj
if not need_slice(slice_obj):
return obj
labels = obj._get_axis(axis)
indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop,
slice_obj.step, kind=self.name)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj.take(indexer, axis=axis, convert=False)
class _LocIndexer(_LocationIndexer):
"""Purely label-location based indexer for selection by label.
``.loc[]`` is primarily label based, but may also be used with a
boolean array.
Allowed inputs are:
- A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is
interpreted as a *label* of the index, and **never** as an
integer position along the index).
- A list or array of labels, e.g. ``['a', 'b', 'c']``.
- A slice object with labels, e.g. ``'a':'f'`` (note that contrary
to usual python slices, **both** the start and the stop are included!).
- A boolean array.
- A ``callable`` function with one argument (the calling Series, DataFrame
or Panel) and that returns valid output for indexing (one of the above)
``.loc`` will raise a ``KeyError`` when the items are not found.
See more at :ref:`Selection by Label <indexing.label>`
"""
_valid_types = ("labels (MUST BE IN THE INDEX), slices of labels (BOTH "
"endpoints included! Can be slices of integers if the "
"index is integers), listlike of labels, boolean")
_exception = KeyError
def _has_valid_type(self, key, axis):
ax = self.obj._get_axis(axis)
# valid for a label where all labels are in the index
# slice of lables (where start-end in labels)
# slice of integers (only if in the lables)
# boolean
if isinstance(key, slice):
return True
elif is_bool_indexer(key):
return True
elif is_list_like_indexer(key):
# mi is just a passthru
if isinstance(key, tuple) and isinstance(ax, MultiIndex):
return True
# TODO: don't check the entire key unless necessary
if (not is_iterator(key) and len(key) and
np.all(ax.get_indexer_for(key) < 0)):
raise KeyError("None of [%s] are in the [%s]" %
(key, self.obj._get_axis_name(axis)))
return True
else:
def error():
if isnull(key):
raise TypeError("cannot use label indexing with a null "
"key")
raise KeyError("the label [%s] is not in the [%s]" %
(key, self.obj._get_axis_name(axis)))
try:
key = self._convert_scalar_indexer(key, axis)
if not ax.contains(key):
error()
except TypeError as e:
# python 3 type errors should be raised
if _is_unorderable_exception(e):
error()
raise
except:
error()
return True
def _is_scalar_access(self, key):
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
# b) provide a performant path
if not hasattr(key, '__len__'):
return False
if len(key) != self.ndim:
return False
for i, k in enumerate(key):
if not is_scalar(k):
return False
ax = self.obj.axes[i]
if isinstance(ax, MultiIndex):
return False
if not ax.is_unique:
return False
return True
def _getitem_scalar(self, key):
# a fast-path to scalar access
# if not, raise
values = self.obj.get_value(*key)
return values
def _get_partial_string_timestamp_match_key(self, key, labels):
"""Translate any partial string timestamp matches in key, returning the
new key (GH 10331)"""
if isinstance(labels, MultiIndex):
if isinstance(key, compat.string_types) and \
labels.levels[0].is_all_dates:
# Convert key '2016-01-01' to
# ('2016-01-01'[, slice(None, None, None)]+)
key = tuple([key] + [slice(None)] * (len(labels.levels) - 1))
if isinstance(key, tuple):
# Convert (..., '2016-01-01', ...) in tuple to
# (..., slice('2016-01-01', '2016-01-01', None), ...)
new_key = []
for i, component in enumerate(key):
if isinstance(component, compat.string_types) and \
labels.levels[i].is_all_dates:
new_key.append(slice(component, component, None))
else:
new_key.append(component)
key = tuple(new_key)
return key
def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
key = self._get_partial_string_timestamp_match_key(key, labels)
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
elif is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
# convert various list-like indexers
# to a list of keys
# we will use the *values* of the object
# and NOT the index if its a PandasObject
if isinstance(labels, MultiIndex):
if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim <= 1:
# Series, or 0,1 ndim ndarray
# GH 14730
key = list(key)
elif isinstance(key, ABCDataFrame):
# GH 15438
raise NotImplementedError("Indexing a MultiIndex with a "
"DataFrame key is not "
"implemented")
elif hasattr(key, 'ndim') and key.ndim > 1:
raise NotImplementedError("Indexing a MultiIndex with a "
"multidimensional key is not "
"implemented")
if (not isinstance(key, tuple) and len(key) > 1 and
not isinstance(key[0], tuple)):
key = tuple([key])
# an iterable multi-selection
if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
if is_nested_tuple(key, labels):
locs = labels.get_locs(key)
indexer = [slice(None)] * self.ndim
indexer[axis] = locs
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
self._has_valid_type(key, axis)
return self._get_label(key, axis=axis)
class _iLocIndexer(_LocationIndexer):
"""Purely integer-location based indexing for selection by position.
``.iloc[]`` is primarily integer position based (from ``0`` to
``length-1`` of the axis), but may also be used with a boolean
array.
Allowed inputs are:
- An integer, e.g. ``5``.
- A list or array of integers, e.g. ``[4, 3, 0]``.
- A slice object with ints, e.g. ``1:7``.
- A boolean array.
- A ``callable`` function with one argument (the calling Series, DataFrame
or Panel) and that returns valid output for indexing (one of the above)
``.iloc`` will raise ``IndexError`` if a requested indexer is
out-of-bounds, except *slice* indexers which allow out-of-bounds
indexing (this conforms with python/numpy *slice* semantics).
See more at :ref:`Selection by Position <indexing.integer>`
"""
_valid_types = ("integer, integer slice (START point is INCLUDED, END "
"point is EXCLUDED), listlike of integers, boolean array")
_exception = IndexError
def _has_valid_type(self, key, axis):
if is_bool_indexer(key):
if hasattr(key, 'index') and isinstance(key.index, Index):
if key.index.inferred_type == 'integer':
raise NotImplementedError("iLocation based boolean "
"indexing on an integer type "
"is not available")
raise ValueError("iLocation based boolean indexing cannot use "
"an indexable as a mask")
return True
if isinstance(key, slice):
return True
elif is_integer(key):
return self._is_valid_integer(key, axis)
elif is_list_like_indexer(key):
return self._is_valid_list_like(key, axis)
return False
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
def _is_scalar_access(self, key):
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
# b) provide a performant path
if not hasattr(key, '__len__'):
return False
if len(key) != self.ndim:
return False
for i, k in enumerate(key):
if not is_integer(k):
return False
ax = self.obj.axes[i]
if not ax.is_unique:
return False
return True
def _getitem_scalar(self, key):
# a fast-path to scalar access
# if not, raise
values = self.obj.get_value(*key, takeable=True)
return values
def _is_valid_integer(self, key, axis):
# return a boolean if we have a valid integer indexer
ax = self.obj._get_axis(axis)
l = len(ax)
if key >= l or key < -l:
raise IndexError("single positional indexer is out-of-bounds")
return True
def _is_valid_list_like(self, key, axis):
# return a boolean if we are a valid list-like (e.g. that we don't
# have out-of-bounds values)
# a tuple should already have been caught by this point
# so don't treat a tuple as a valid indexer
if isinstance(key, tuple):
raise IndexingError('Too many indexers')
# coerce the key to not exceed the maximum size of the index
arr = np.array(key)
ax = self.obj._get_axis(axis)
l = len(ax)
if (hasattr(arr, '__len__') and len(arr) and
(arr.max() >= l or arr.min() < -l)):
raise IndexError("positional indexers are out-of-bounds")
return True
def _getitem_tuple(self, tup):
self._has_valid_tuple(tup)
try:
return self._getitem_lowerdim(tup)
except:
pass
retval = self.obj
axis = 0
for i, key in enumerate(tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if is_null_slice(key):
axis += 1
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=axis)
# if the dim was reduced, then pass a lower-dim the next time
if retval.ndim < self.ndim:
axis -= 1
# try to get for the next axis
axis += 1
return retval
def _get_slice_axis(self, slice_obj, axis=0):
obj = self.obj
if not need_slice(slice_obj):
return obj
slice_obj = self._convert_slice_indexer(slice_obj, axis)
if isinstance(slice_obj, slice):
return self._slice(slice_obj, axis=axis, kind='iloc')
else:
return self.obj.take(slice_obj, axis=axis, convert=False)
def _get_list_axis(self, key, axis=0):
"""
Return Series values by list or array of integers
Parameters
----------
key : list-like positional indexer
axis : int (can only be zero)
Returns
-------
Series object
"""
try:
return self.obj.take(key, axis=axis, convert=False)
except IndexError:
# re-raise with different error message
raise IndexError("positional indexers are out-of-bounds")
def _getitem_axis(self, key, axis=0):
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
if isinstance(key, list):
try:
key = np.asarray(key)
except TypeError: # pragma: no cover
pass
if is_bool_indexer(key):
self._has_valid_type(key, axis)
return self._getbool_axis(key, axis=axis)
# a list of integers
elif is_list_like_indexer(key):
return self._get_list_axis(key, axis=axis)
# a single integer
else:
key = self._convert_scalar_indexer(key, axis)
if not is_integer(key):
raise TypeError("Cannot index by location index with a "
"non-integer key")
# validate the location
self._is_valid_integer(key, axis)
return self._get_loc(key, axis=axis)
def _convert_to_indexer(self, obj, axis=0, is_setter=False):
""" much simpler as we only have to deal with our valid types """
# make need to convert a float key
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
elif is_float(obj):
return self._convert_scalar_indexer(obj, axis)
elif self._has_valid_type(obj, axis):
return obj
raise ValueError("Can only index by location with a [%s]" %
self._valid_types)
class _ScalarAccessIndexer(_NDFrameIndexer):
""" access scalars quickly """
def _convert_key(self, key, is_setter=False):
return list(key)
def __getitem__(self, key):
if not isinstance(key, tuple):
# we could have a convertible item here (e.g. Timestamp)
if not is_list_like_indexer(key):
key = tuple([key])
else:
raise ValueError('Invalid call for scalar access (getting)!')
key = self._convert_key(key)
return self.obj.get_value(*key, takeable=self._takeable)
def __setitem__(self, key, value):
if isinstance(key, tuple):
key = tuple(com._apply_if_callable(x, self.obj) for x in key)
else:
# scalar callable may return tuple
key = com._apply_if_callable(key, self.obj)
if not isinstance(key, tuple):
key = self._tuplify(key)
if len(key) != self.obj.ndim:
raise ValueError('Not enough indexers for scalar access '
'(setting)!')
key = list(self._convert_key(key, is_setter=True))
key.append(value)
self.obj.set_value(*key, takeable=self._takeable)
class _AtIndexer(_ScalarAccessIndexer):
"""Fast label-based scalar accessor
Similarly to ``loc``, ``at`` provides **label** based scalar lookups.
You can also set using these indexers.
"""
_takeable = False
def _convert_key(self, key, is_setter=False):
""" require they keys to be the same type as the index (so we don't
fallback)
"""
# allow arbitrary setting
if is_setter:
return list(key)
for ax, i in zip(self.obj.axes, key):
if ax.is_integer():
if not is_integer(i):
raise ValueError("At based indexing on an integer index "
"can only have integer indexers")
else:
if is_integer(i):
raise ValueError("At based indexing on an non-integer "
"index can only have non-integer "
"indexers")
return key
class _iAtIndexer(_ScalarAccessIndexer):
"""Fast integer location scalar accessor.
Similarly to ``iloc``, ``iat`` provides **integer** based lookups.
You can also set using these indexers.
"""
_takeable = True
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
def _convert_key(self, key, is_setter=False):
""" require integer args (and convert to label arguments) """
for a, i in zip(self.obj.axes, key):
if not is_integer(i):
raise ValueError("iAt based indexing can only have integer "
"indexers")
return key
# 32-bit floating point machine epsilon
_eps = 1.1920929e-07
def length_of_indexer(indexer, target=None):
"""return the length of a single non-tuple indexer which could be a slice
"""
if target is not None and isinstance(indexer, slice):
l = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += l
if stop is None or stop > l:
stop = l
elif stop < 0:
stop += l
if step is None:
step = 1
elif step < 0:
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, Index, np.ndarray, list)):
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
def convert_to_index_sliceable(obj, key):
"""if we are index sliceable, then return my slicer, otherwise return None
"""
idx = obj.index
if isinstance(key, slice):
return idx._convert_slice_indexer(key, kind='getitem')
elif isinstance(key, compat.string_types):
# we are an actual column
if obj._data.items.contains(key):
return None
# We might have a datetimelike string that we can translate to a
# slice here via partial string indexing
if idx.is_all_dates:
try:
return idx._get_string_slice(key)
except (KeyError, ValueError, NotImplementedError):
return None
return None
def is_index_slice(obj):
def _is_valid_index(x):
return (is_integer(x) or is_float(x) and
np.allclose(x, int(x), rtol=_eps, atol=0))
def _crit(v):
return v is None or _is_valid_index(v)
both_none = obj.start is None and obj.stop is None
return not both_none and (_crit(obj.start) and _crit(obj.stop))
def check_bool_indexer(ax, key):
# boolean indexing, need to check that the data are aligned, otherwise
# disallowed
# this function assumes that is_bool_indexer(key) == True
result = key
if isinstance(key, ABCSeries) and not key.index.equals(ax):
result = result.reindex(ax)
mask = isnull(result._values)
if mask.any():
raise IndexingError('Unalignable boolean Series provided as '
'indexer (index of the boolean Series and of '
'the indexed object do not match')
result = result.astype(bool)._values
elif is_sparse(result):
result = result.to_dense()
result = np.asarray(result, dtype=bool)
else:
# is_bool_indexer has already checked for nulls in the case of an
# object array key, so no check needed here
result = np.asarray(result, dtype=bool)
return result
def convert_missing_indexer(indexer):
""" reverse convert a missing indexer, which is a dict
return the scalar indexer and a boolean indicating if we converted
"""
if isinstance(indexer, dict):
# a missing key (but not a tuple indexer)
indexer = indexer['key']
if isinstance(indexer, bool):
raise KeyError("cannot use a single bool to index into setitem")
return indexer, True
return indexer, False
def convert_from_missing_indexer_tuple(indexer, axes):
""" create a filtered indexer that doesn't have any missing indexers """
def get_indexer(_i, _idx):
return (axes[_i].get_loc(_idx['key']) if isinstance(_idx, dict) else
_idx)
return tuple([get_indexer(_i, _idx) for _i, _idx in enumerate(indexer)])
def maybe_convert_indices(indices, n):
""" if we have negative indicies, translate to postive here
if have indicies that are out-of-bounds, raise an IndexError
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If list is empty, np.array will return float and cause indexing
# errors.
return np.empty(0, dtype=np.int_)
mask = indices < 0
if mask.any():
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
def maybe_convert_ix(*args):
"""
We likely want to take the cross-product
"""
ixify = True
for arg in args:
if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)):
ixify = False
if ixify:
return np.ix_(*args)
else:
return args
def is_nested_tuple(tup, labels):
# check for a compatiable nested tuple and multiindexes among the axes
if not isinstance(tup, tuple):
return False
# are we nested tuple of: tuple,list,slice
for i, k in enumerate(tup):
if isinstance(k, (tuple, list, slice)):
return isinstance(labels, MultiIndex)
return False
def is_list_like_indexer(key):
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and
type(key) is not tuple)
def is_label_like(key):
# select a label or row
return not isinstance(key, slice) and not is_list_like_indexer(key)
def need_slice(obj):
return (obj.start is not None or obj.stop is not None or
(obj.step is not None and obj.step != 1))
def maybe_droplevels(index, key):
# drop levels
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index.droplevel(0)
except:
# we have dropped too much, so back out
return original_index
else:
try:
index = index.droplevel(0)
except:
pass
return index
def _non_reducing_slice(slice_):
"""
Ensurse that a slice doesn't reduce to a Series or Scalar.
Any user-paseed `subset` should have this called on it
to make sure we're always working with DataFrames.
"""
# default to column slice, like DataFrame
# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
kinds = tuple(list(compat.string_types) + [ABCSeries, np.ndarray, Index,
list])
if isinstance(slice_, kinds):
slice_ = IndexSlice[:, slice_]
def pred(part):
# true when slice does *not* reduce
return isinstance(part, slice) or is_list_like(part)
if not is_list_like(slice_):
if not isinstance(slice_, slice):
# a 1-d slice, like df.loc[1]
slice_ = [[slice_]]
else:
# slice(a, b, c)
slice_ = [slice_] # to tuplize later
else:
slice_ = [part if pred(part) else [part] for part in slice_]
return tuple(slice_)
def _maybe_numeric_slice(df, slice_, include_bool=False):
"""
want nice defaults for background_gradient that don't break
with non-numeric data. But if slice_ is passed go with that.
"""
if slice_ is None:
dtypes = [np.number]
if include_bool:
dtypes.append(bool)
slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns]
return slice_
| mit |
466152112/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
lj201112/Mineral-Formula-Calculation | Python/main/Ilmenite.py | 1 | 6782 | # -*- coding: utf-8 -*-
"""
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER
Copyright 2016 LiJie, lj201112@163.com
This file is part of Mineral Formula Calculation.
Mineral Formula Calculation is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
Mineral Formula Calculation is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with Mineral Formula Calculation. If not, see <http://www.gnu.org/licenses/>.
Created on 2016/12/15
Author: LiJie
Email: lj201112@163.com
License: GNU Lesser General Public License (LGPL)
"""
def Ilm_Calc(data, data_index):
import numpy as np
import pandas as pd
print "------Magnetite formula Calculation------"
print "The formula calculation of Magnetite only includede 10 elements: SiO2 Al2O3 TiO2 FeO MnO MgO CaO NiO V2O3 Cr2O3. If you have a different list, please rewrite the source code"
raw_input("Press ENTER to continue...")
print
SiO2_M = 28.086 + 15.999 * 2
Al2O3_M = 26.982 * 2 + 15.999 * 3
TiO2_M = 47.867 + 15.999 * 2
FeO_M = 55.845 + 15.999
Fe2O3_M = 55.845 * 2 + 15.999 * 3
MnO_M = 54.938 + 15.999
MgO_M = 24.305 + 15.999
CaO_M = 40.078 + 15.999
Na2O_M = 22.990 * 2 + 15.999
K2O_M = 39.098 * 2 + 15.999
NiO_M = 58.693 + 15.999
V2O3_M = 50.942 * 2 + 15.999 * 3
Cr2O3_M = 51.996 * 2 + 15.999 * 3
# mass percent of the elements
SiO2_wt = data[u'SiO2']
Al2O3_wt = data[u'Al2O3']
TiO2_wt = data[u'TiO2']
TFeO_wt = data[u'FeO']
MnO_wt = data[u'MnO']
MgO_wt = data[u'MgO']
CaO_wt = data[u'CaO']
NiO_wt = data[u'NiO']
V2O3_wt = data[u'V2O3']
Cr2O3_wt= data[u'Cr2O3']
#mol per 100g molecules
SiO2_n = SiO2_wt / SiO2_M
Al2O3_n = Al2O3_wt / Al2O3_M
TiO2_n = TiO2_wt / TiO2_M
TFeO_n = TFeO_wt / FeO_M
MnO_n = MnO_wt / MnO_M
MgO_n = MgO_wt / MgO_M
CaO_n = CaO_wt / CaO_M
NiO_n = NiO_wt / NiO_M
V2O3_n = V2O3_wt / V2O3_M
Cr2O3_n = Cr2O3_wt / Cr2O3_M
#Cation number
SiO2_ca = SiO2_n
Al2O3_ca = Al2O3_n * 2
TiO2_ca = TiO2_n
TFeO_ca = TFeO_n
MnO_ca = MnO_n
MgO_ca = MgO_n
CaO_ca = CaO_n
NiO_ca = NiO_n
V2O3_ca = V2O3_n * 2
Cr2O3_ca = Cr2O3_n * 2
#Total number of cations
sum_ca = SiO2_ca + Al2O3_ca + TiO2_ca + TFeO_ca + MnO_ca + MgO_ca + CaO_ca + NiO_ca + V2O3_ca + Cr2O3_ca
#---Electrovalency Difference Calculation of Fe2+/Fe3+---
#General formula of limenite is FeTiO3, calculate the cation ratio on the basis of 2
ca_ratio = sum_ca / 2
SiO2_co = SiO2_ca / ca_ratio
Al2O3_co= Al2O3_ca / ca_ratio
TiO2_co = TiO2_ca / ca_ratio
TFeO_co = TFeO_ca / ca_ratio
MnO_co = MnO_ca / ca_ratio
MgO_co = MgO_ca / ca_ratio
CaO_co = CaO_ca / ca_ratio
NiO_co = NiO_ca / ca_ratio
V2O3_co = V2O3_ca / ca_ratio
Cr2O3_co = Cr2O3_ca / ca_ratio
#Electrovalency of Cation ratio
SiO2_el = SiO2_co * 4
Al2O3_el = Al2O3_co * 3
TiO2_el = TiO2_co * 4
TFeO_el = TFeO_co * 2
MnO_el = MnO_co * 2
MgO_el = MgO_co * 2
CaO_el = CaO_co * 2
NiO_el = NiO_co * 2
V2O3_el = V2O3_co * 3
Cr2O3_el = Cr2O3_co * 3
sum_el = SiO2_el + Al2O3_el + TiO2_el + TFeO_el + MnO_el + MgO_el + CaO_el + NiO_el + V2O3_el + Cr2O3_el
#Ideal Anion Electrovalency = O * 3 = 6
#Calculation of Fe3+ and Fe2+
Fe2O3_co = 6 - sum_el
FeO_co = TFeO_co - Fe2O3_co
Fe2O3_wt = Fe2O3_co * ca_ratio * Fe2O3_M / 2
FeO_wt = FeO_co * ca_ratio * FeO_M
#Anion number
SiO2_an = SiO2_n * 2
Al2O3_an = Al2O3_n * 3
TiO2_an = TiO2_n * 2
TFeO_an = TFeO_n
MnO_an = MnO_n
MgO_an = MgO_n
CaO_an = CaO_n
NiO_an = NiO_n
V2O3_an = V2O3_n * 3
Cr2O3_an = Cr2O3_n * 3
sum_an = SiO2_an + Al2O3_an + TiO2_an + TFeO_an + MnO_an + MgO_an + CaO_an + NiO_an + V2O3_an + Cr2O3_an
an_ratio = sum_an / 3
sum_wt = SiO2_wt + TiO2_wt + Al2O3_wt + Fe2O3_wt + FeO_wt + MnO_wt + MgO_wt + CaO_wt + NiO_wt + V2O3_wt + Cr2O3_wt
#Normalizing the wt% and then calculate the anion ratio on the basis of 3O
SiO2_co3 = SiO2_wt / sum_wt * 100 / SiO2_M * 1 / an_ratio
Al2O3_co3= Al2O3_wt / sum_wt * 100 / Al2O3_M * 2 / an_ratio
TiO2_co3 = TiO2_wt / sum_wt * 100 / TiO2_M * 1 / an_ratio
Fe2O3_co3 = Fe2O3_wt / sum_wt * 100 / Fe2O3_M * 2 / an_ratio
FeO_co3 = FeO_wt / sum_wt * 100 / FeO_M * 1 / an_ratio
MnO_co3 = MnO_wt / sum_wt * 100 / MnO_M * 1 / an_ratio
MgO_co3 = MgO_wt / sum_wt * 100 / MgO_M * 1 / an_ratio
CaO_co3 = CaO_wt / sum_wt * 100 / MgO_M * 1 / an_ratio
NiO_co3 = NiO_wt / sum_wt * 100 / MgO_M * 1 / an_ratio
V2O3_co3 = V2O3_wt / sum_wt * 100 / V2O3_M * 2 / an_ratio
Cr2O3_co3 = Cr2O3_wt / sum_wt * 100 / Cr2O3_M * 2 /an_ratio
sum_co = SiO2_co + Al2O3_co + TiO2_co + FeO_co + Fe2O3_co + MnO_co + MgO_co + CaO_co + NiO_co + V2O3_co + Cr2O3_co
data_cali = {u'SiO2': float("%.3f" % SiO2_wt),
u'TiO2': float("%.3f" % TiO2_wt),
u'Al2O3': float("%.3f" % Al2O3_wt),
u'Fe2O3': float("%.3f" % Fe2O3_wt),
u'FeO': float("%.3f" % FeO_wt),
u'MnO': float("%.3f" % MnO_wt),
u'MgO': float("%.3f" % MgO_wt),
u'CaO': float("%.3f" % CaO_wt),
u'NiO': float("%.3f" % NiO_wt),
u'V2O3': float("%.3f" % V2O3_wt),
u'Cr2O3': float("%.3f" % Cr2O3_wt),
u'Total': float("%.3f" % sum_wt),
u'Comment': data['Comment'],
u'Si': float("%.4f" % SiO2_co3),
u'Ti': float("%.4f" % TiO2_co3),
u'Al': float("%.4f" % Al2O3_co3),
u'Fe3+': float("%.4f" % Fe2O3_co3),
u'Fe2+': float("%.4f" % FeO_co3),
u'Mn': float("%.4f" % MnO_co3),
u'Mg': float("%.4f" % MgO_co3),
u'Ca': float("%.4f" % CaO_co3),
u'Ni': float("%.4f" % NiO_co3),
u'V': float("%.4f" % V2O3_co3),
u'Cr': float("%.4f" % Cr2O3_co3)
}
return pd.DataFrame(data_cali, index = [data_index], columns = [u'SiO2', u'TiO2', u'Al2O3', u'Fe2O3', u'FeO', u'MnO', u'MgO', u'CaO', u'NiO', u'V2O3', u'Cr2O3', u'Total', u'Comment', u'Si', u'Ti', u'Al', u'Fe3+', u'Fe2+', u'Mn', u'Mg', u'Ca', u'Ni', u'V', u'Cr'])
| gpl-3.0 |
asazo/ANN | tarea3/pregunta1_h.py | 1 | 1885 | from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.cross_validation import KFold
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
url = 'http://www.inf.utfsm.cl/~cvalle/international-airline-passengers.csv'
dataframe = pd.read_csv(url, sep=',', usecols=[1], engine='python', skipfooter=3)
dataframe[:] = dataframe[:].astype('float32')
df_train, df_test = dataframe[0:96].values, dataframe[96:].values
scaler = MinMaxScaler(feature_range=(0, 1)).fit(df_train)
stream_train_scaled = scaler.transform(df_train)
stream_test_scaled = scaler.transform(df_test)
def create_dataset(dataset, lag=1):
dataX = np.zeros((dataset.shape[0]-lag, lag), dtype=np.float32)
for i in range(lag):
dataX[:,i] = dataset[i:-lag+i][:,0]
dataY = dataset[lag:]
return dataX, dataY
lag = 3
trainX, TrainY = create_dataset(stream_train_scaled, lag)
testX, TestY = create_dataset(stream_test_scaled, lag)
TrainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
TestX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
nb = range(4,13,2)
k = 5
kf_CV = KFold(TrainY[:,0].shape[0], k, shuffle=True)
results = []
for n in nb:
print "Usando",n,"bloques LSTM"
losses = []
for i, (train, test) in enumerate(kf_CV):
print "Analizando fold", i+1, "/", k
model = None
model = Sequential()
model.add(LSTM(output_dim=n, input_dim=lag, activation='tanh', inner_activation='sigmoid'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(TrainX[train], TrainY[train], nb_epoch=100, batch_size=1, verbose=0)
loss = model.evaluate(TrainX[test], TrainY[test])
losses.append(loss)
results.append(losses)
print losses
print "Resultados finales"
print results
| mit |
alistairlow/tensorflow | tensorflow/contrib/learn/python/learn/estimators/multioutput_test.py | 136 | 1696 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-output tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
from tensorflow.python.platform import test
class MultiOutputTest(test.TestCase):
"""Multi-output tests."""
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
x = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(x).ravel(), np.pi * np.cos(x).ravel()]).T
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(x),
label_dimension=2)
regressor.fit(x, y, steps=100)
score = mean_squared_error(np.array(list(regressor.predict_scores(x))), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
ephes/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
yannikbehr/spectroscopy | tests/test_flyspecplugin.py | 1 | 16814 | import datetime
import glob
import inspect
import os
import tempfile
import unittest
import matplotlib
matplotlib.use('Agg')
import matplotlib.image
import numpy as np
from scipy.stats import binned_statistic
from scipy.interpolate import interp1d
from spectroscopy.dataset import Dataset
from spectroscopy.plugins.flyspec import FlySpecPlugin
from spectroscopy.plugins.flyspec import FlySpecPluginException
from spectroscopy.util import split_by_scan, _array_multi_sort, vec2bearing
from spectroscopy.visualize import plot
from spectroscopy.datamodel import (InstrumentBuffer,
TargetBuffer,
PreferredFluxBuffer)
class FlySpecPluginTestCase(unittest.TestCase):
"""
Test plugin to read FlySpec data.
"""
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe()))), "data")
def compare_images(self, fh, image_fn):
fh.seek(0)
actual_image = matplotlib.image.imread(fh, format='png')
expected_image = matplotlib.image.imread(image_fn, format='png')
# Set the "color" of fully transparent pixels to white. This avoids
# the issue of different "colors" for transparent pixels.
expected_image[expected_image[..., 3] <= 0.0035] = \
[1.0, 1.0, 1.0, 0.0]
actual_image[actual_image[..., 3] <= 0.0035] = \
[1.0, 1.0, 1.0, 0.0]
# This deviates a bit from the matplotlib version and just
# calculates the root mean square error of all pixel values without
# any other fancy considerations. It also uses the alpha channel of
# the images. Scaled by 255.
rms = np.sqrt(
np.sum((255.0 * (expected_image - actual_image)) ** 2) /
float(expected_image.size))
return rms
def test_add(self):
d1 = Dataset(tempfile.mktemp(), 'w')
e = d1.read(os.path.join(self.data_dir, '2016_06_11_0830_TOFP04.txt'),
ftype='FLYSPEC', timeshift=12.0)
r = d1.new(e['RawDataBuffer'])
cb = e['ConcentrationBuffer']
cb.rawdata = [r]
d1.new(cb)
d2 = Dataset(tempfile.mktemp(), 'w')
e = d2.read(os.path.join(self.data_dir, '2016_06_11_0900_TOFP04.txt'),
ftype='FLYSPEC', timeshift=12.0)
r = d2.new(e['RawDataBuffer'])
cb = e['ConcentrationBuffer']
cb.rawdata = [r]
d2.new(cb)
d1 += d2
self.assertEqual(len(d1.elements['Concentration']), 2)
self.assertEqual(len(d1.elements['RawData']), 2)
def test_read(self):
d = Dataset(tempfile.mktemp(), 'w')
e = d.read(os.path.join(self.data_dir,
'2012_02_29_1340_CHILE.txt'),
ftype='FLYSPEC')
r = d.new(e['RawDataBuffer'])
cb = e['ConcentrationBuffer']
cb.rawdata = [r]
c = d.new(cb)
r = d.elements['RawData'][0]
self.assertEqual(sum([x.size for x in r.datetime]), 4600)
self.assertEqual(r.inc_angle[0], 174.750)
c = d.elements['Concentration'][0]
r1 = c.rawdata[0]
self.assertEqual(len(c.value[:]), 4600)
np.testing.assert_array_almost_equal(r1.position[0],
[-67.8047, -23.3565, 3927.], 2)
# dicretize all retrievals onto a grid to show a daily plot
bins = np.arange(0, 180, 1.0)
m = []
for _angle, _so2 in split_by_scan(r1.inc_angle[:], c.value[:]):
_so2_binned = binned_statistic(_angle, _so2, 'mean', bins)
m.append(_so2_binned.statistic)
m = np.array(m)
ids = np.argmax(np.ma.masked_invalid(m), axis=1)
maxima = np.array([166., 167., 167., 167., 168., 167., 168., 167.,
167., 167., 167., 167., 168., 167., 167., 167.,
167., 166., 167., 166., 166., 167., 165., 165.,
165., 164., 165., 163., 163., 164., 163., 165.,
164., 164., 164., 161.])
np.testing.assert_array_almost_equal(maxima, bins[ids], 2)
d1 = Dataset(tempfile.mktemp(), 'w')
e = d1.read(os.path.join(self.data_dir, '2016_06_11_0830_TOFP04.txt'),
ftype='FLYSPEC', timeshift=12.0)
r = d1.new(e['RawDataBuffer'])
cb = e['ConcentrationBuffer']
cb.rawdata = [r]
d1.new(cb)
c = d1.elements['Concentration'][0]
r = c.rawdata[0]
m = []
for _angle, _so2 in split_by_scan(r.inc_angle[:], c.value[:]):
_so2_binned = binned_statistic(_angle, _so2, 'mean', bins)
m.append(_so2_binned.statistic)
m = np.array(m)
ids = np.argmax(np.ma.masked_invalid(m), axis=1)
maxima = np.array([147., 25., 27., 86., 29., 31., 27., 27., 28., 137.,
34., 34.])
np.testing.assert_array_almost_equal(maxima, bins[ids], 2)
def test_read_flux(self):
d = Dataset(tempfile.mktemp(), 'w')
fin = os.path.join(self.data_dir, 'TOFP04', 'TOFP04_2017_06_14.txt')
e = d.read(fin, ftype='flyspecflux', timeshift=13.0)
nlines = None
with open(fin) as fd:
nlines = len(fd.readlines())
self.assertEqual(e['FluxBuffer'].value.shape, (nlines-1,))
fb = e['FluxBuffer']
self.assertEqual(fb.datetime[-1],
np.datetime64('2017-06-14T03:29:38.033000'))
def test_read_refspec(self):
d = Dataset(tempfile.mktemp(), 'w')
x = [521, 637, 692, 818]
y = [305., 315., 319.5, 330.]
f = interp1d(x, y, fill_value='extrapolate')
xnew = list(range(0, 2048))
wavelengths = f(xnew)
with self.assertRaises(FlySpecPluginException):
e = d.read(os.path.join(self.data_dir, 'TOFP04',
'Cal_20170602_0956_dark.bin'),
ftype='FLYSPECREF', wavelengths=wavelengths)
e = d.read(os.path.join(self.data_dir, 'TOFP04',
'Cal_20170602_0956_dark.bin'),
ftype='FLYSPECREF', type='dark', wavelengths=wavelengths)
self.assertEqual(e['RawDataBuffer'].d_var.shape, (10, 2048))
def test_read_wind(self):
d = Dataset(tempfile.mktemp(), 'w')
fin = os.path.join(self.data_dir, 'TOFP04', 'wind', '2017_06_14.txt')
gf = d.read(fin, ftype='flyspecwind', timeshift=13)
vx = gf.vx[0]
vy = gf.vy[0]
dt = gf.datetime[0]
v = np.sqrt(vx*vx + vy*vy)
self.assertAlmostEqual(v, 10.88, 2)
self.assertAlmostEqual(vec2bearing(vx, vy), 255, 6)
self.assertEqual(dt, np.datetime64('2017-06-13T17:00:00'))
@unittest.skip("Skipping")
def test_plot(self):
d = Dataset(tempfile.mktemp(), 'w')
e = d.read(os.path.join(self.data_dir, '2012_02_29_1340_CHILE.txt'),
ftype='FLYSPEC', timeshift=12.0)
rdt = d.new(e['RawDataTypeBuffer'])
rb = e['RawDataBuffer']
rb.type = rdt
r = d.new(rb)
cb = e['ConcentrationBuffer']
cb.rawdata = [r]
cb.rawdata_indices = np.arange(cb.value.shape[0])
c = d.new(cb)
if False:
with tempfile.TemporaryFile() as fd:
plot(c, savefig=fd, timeshift=12.0)
expected_image = os.path.join(self.data_dir,
'chile_retrievals_overview.png')
rms = self.compare_images(fd, expected_image)
self.assertTrue(rms <= 0.001)
def test_spectra(self):
"""
Test reading binary file containing the raw spectra together with
the text file.
"""
d = Dataset(tempfile.mktemp(), 'w')
fin_txt = os.path.join(self.data_dir, 'TOFP04', '2017_06_14_0930.txt')
fin_bin = os.path.join(self.data_dir, 'TOFP04', '2017_06_14_0930.bin')
fin_high = os.path.join(self.data_dir, 'TOFP04',
'Cal_20170602_0956_high.bin')
fin_low = os.path.join(self.data_dir, 'TOFP04',
'Cal_20170602_0956_low.bin')
fin_dark = os.path.join(self.data_dir, 'TOFP04',
'Cal_20170602_0956_dark.bin')
fin_ref = os.path.join(self.data_dir, 'TOFP04',
'Cal_20170602_0956_ref.bin')
x = [521, 637, 692, 818]
y = [305., 315., 319.5, 330.]
f = interp1d(x, y, fill_value='extrapolate')
xnew = list(range(0, 2048))
wavelengths = f(xnew)
e = d.read(fin_txt, spectra=fin_bin, wavelengths=wavelengths,
ftype='flyspec', timeshift=12.0)
self.assertEqual(e['RawDataBuffer'].d_var.shape, (1321, 2048))
rdtb = e['RawDataTypeBuffer']
rdt = d.new(rdtb)
rb = e['RawDataBuffer']
rb.type = rdt
r = d.new(rb)
cb = e['ConcentrationBuffer']
rdlist = [r]
for _f in [fin_high, fin_low, fin_dark, fin_ref]:
e = d.read(_f, ftype='flyspecref', wavelengths=wavelengths,
type=_f.replace('fin_', ''))
rdtb = e['RawDataTypeBuffer']
rdt = d.new(rdtb)
rb = e['RawDataBuffer']
rb.type = rdt
r = d.new(rb)
rdlist.append(r)
cb.rawdata = rdlist
c = d.new(cb)
for _r in c.rawdata[:]:
if _r.type.name[0] == 'measurement':
break
if False:
with tempfile.TemporaryFile() as fd:
plot(_r, savefig=fd)
expected_image = os.path.join(self.data_dir,
'raw_data_plot.png')
rms = self.compare_images(fd, expected_image)
self.assertTrue(rms <= 0.001)
def test_readabunch(self):
"""
Read in a whole day's worth of data including the reference spectra,
the flux results, and the wind data.
"""
def keyfunc(fn):
date = os.path.basename(fn).split('.')[0]
year, month, day, hourmin = date.split('_')
return datetime.datetime(int(year), int(month), int(day),
int(hourmin[0:2]), int(hourmin[2:]))
# Reference spectra
fin_high = os.path.join(self.data_dir, 'TOFP04',
'Cal_20170602_0956_high.bin')
fin_low = os.path.join(self.data_dir, 'TOFP04',
'Cal_20170602_0956_low.bin')
fin_dark = os.path.join(self.data_dir, 'TOFP04',
'Cal_20170602_0956_dark.bin')
fin_ref = os.path.join(self.data_dir, 'TOFP04',
'Cal_20170602_0956_ref.bin')
bearing = 285.
x = [521, 637, 692, 818]
y = [305., 315., 319.5, 330.]
f = interp1d(x, y, fill_value='extrapolate')
xnew = list(range(0, 2048))
wavelengths = f(xnew)
d = Dataset(tempfile.mktemp(), 'w')
ib = InstrumentBuffer(location='Te Maari crater',
type='FlySpec',
name='TOFP04')
inst = d.new(ib)
tb = TargetBuffer(name='Upper Te Maari crater',
position=[175.671854359, -39.107850505, 1505.])
t = d.new(tb)
rdlist = []
for _k, _f in zip(['high', 'low', 'dark', 'ref'],
[fin_high, fin_low, fin_dark, fin_ref]):
e = d.read(_f, ftype='flyspecref', wavelengths=wavelengths,
type=_k)
rdtb = e['RawDataTypeBuffer']
rdt = d.new(rdtb)
rb = e['RawDataBuffer']
rb.type = rdt
rb.instrument = inst
r = d.new(rb)
rdlist.append(r)
files = glob.glob(os.path.join(self.data_dir, 'TOFP04', '2017*.txt'))
files = sorted(files, key=keyfunc)
r = None
c = None
nlines = 0
last_index = 0
for _f in files:
try:
fin_bin = _f.replace('.txt', '.bin')
with open(_f) as fd:
nlines += len(fd.readlines())
e = d.read(_f, ftype='FLYSPEC', spectra=fin_bin,
wavelengths=wavelengths, bearing=bearing,
timeshift=12)
if r is None and c is None:
rdt = d.new(e['RawDataTypeBuffer'])
rb = e['RawDataBuffer']
rb.type = rdt
rb.instrument = inst
rb.target = t
r = d.new(rb)
cb = e['ConcentrationBuffer']
rdlist.append(r)
cb.rawdata = rdlist
cb.rawdata_indices = np.arange(cb.value.shape[0])
last_index = cb.value.shape[0] - 1
c = d.new(cb)
else:
r.append(e['RawDataBuffer'])
cb = e['ConcentrationBuffer']
cb.rawdata_indices = (last_index + 1 +
np.arange(cb.value.shape[0]))
last_index = last_index + cb.value.shape[0]
c.append(cb)
except Exception as ex:
print((ex, _f, fin_bin))
continue
# Check all data has been read
self.assertEqual(c.rawdata[4].d_var.shape, (nlines, 2048))
self.assertEqual(c.rawdata[4].inc_angle.shape, (nlines,))
self.assertEqual(c.value[0], 119.93)
self.assertEqual(c.value[-1], 23.30)
self.assertEqual(c.rawdata[4].datetime[-1],
np.datetime64('2017-06-14T04:30:00.535'))
self.assertEqual(c.rawdata[4].datetime[0],
np.datetime64('2017-06-13T20:30:49.512'))
if False:
with tempfile.TemporaryFile() as fd:
plot(c, savefig=fd)
expected_image = os.path.join(self.data_dir, 'TOFP04',
'concentration_plot.png')
rms = self.compare_images(fd, expected_image)
self.assertTrue(rms <= 0.001)
with tempfile.TemporaryFile() as fd:
plot(c.rawdata[0], savefig=fd)
expected_image = os.path.join(self.data_dir, 'TOFP04',
'ref_spectrum.png')
rms = self.compare_images(fd, expected_image)
self.assertTrue(rms <= 0.001)
fe = d.read(os.path.join(self.data_dir, 'TOFP04',
'TOFP04_2017_06_14.txt'),
ftype='flyspecflux', timeshift=12)
gf = d.read(os.path.join(self.data_dir, 'TOFP04', 'wind',
'2017_06_14.txt'),
ftype='flyspecwind', timeshift=12)
fb = fe['FluxBuffer']
draw = r.datetime[:].astype('datetime64[us]')
inds = []
for i in range(fb.value.shape[0]):
d0 = fb.datetime[i].astype('datetime64[us]')
idx0 = np.argmin(abs(draw-d0))
if i < fb.value.shape[0]-1:
d1 = fb.datetime[i+1].astype('datetime64[us]')
idx1 = np.argmin(abs(draw-d1))
# There is a small bug in Nial's program that gets
# the start of the final scan in a file wrong
if r.inc_angle[idx1-1] < r.inc_angle[idx1]:
idx1 -= 1
fb.datetime[i+1] = r.datetime[idx1]
else:
idx1 = r.datetime.shape[0]
inds.append([idx0, idx1-1])
fb.concentration_indices = inds
fb.concentration = c
mb = fe['MethodBuffer']
m = d.new(mb)
fb.method = m
fb.gasflow = gf
f = d.new(fb)
nos = 18
i0, i1 = f.concentration_indices[nos]
cn = f.concentration
rn = cn.rawdata[4]
self.assertAlmostEqual(f.value[nos], 0.62, 2)
self.assertEqual(rn.inc_angle[i0], 25.)
self.assertEqual(rn.inc_angle[i1], 150.)
self.assertEqual(f.datetime[nos],
np.datetime64('2017-06-13T21:20:17.196000'))
pfb = PreferredFluxBuffer(fluxes=[f],
flux_indices=[[nos]],
value=[f.value[nos]],
datetime=[f.datetime[nos]])
d.new(pfb)
def suite():
return unittest.makeSuite(FlySpecPluginTestCase, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| gpl-3.0 |
bgshin/doc-classify-multi-gpu | src/cnntw/train_multi.py | 1 | 13455 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import cnnt_input
import cnn_model
from sklearn.metrics import precision_score, recall_score, f1_score
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', './train/',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_integer('num_gpus', 4,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_float('dropout_keep_prob', 0.8, """dropout_keep_prob""")
def tower_loss(namescope, target, batch_size=4):
"""Calculate the total loss on a single tower running the CIFAR model.
Args:
namescope: unique prefix string identifying the tweets tower, e.g. 'tower_0'
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Get images and labels for tweets
txts, labels = cnnt_input.get_inputs(target, batch_size=batch_size)
# if target == 'trn':
# txts, labels = cnnt_input.get_inputs('trn', batch_size=batch_size)
#
# else: # 'dev'
# txts, labels = cnnt_input.get_inputs('tst', batch_size=batch_size)
# Build inference Graph.
if target=='trn':
logits = cnn_model.inference(txts, dropout_keep_prob=FLAGS.dropout_keep_prob)
else:
logits = cnn_model.inference(txts)
y_true = tf.argmax(labels, 1, name="golds")
y_pred = tf.argmax(logits, 1, name="predictions")
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
_, accuracy = cnn_model.loss(logits, labels)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', namescope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % cnn_model.TOWER_NAME, '', l.op.name)
tf.summary.scalar(loss_name, l)
return total_loss, accuracy, logits, y_true, y_pred
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train():
maxdev = 0
maxtst = 0
maxindex = 0
"""Train cnnt for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
# num_batches_per_epoch = (cnnt_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
# FLAGS.batch_size)
# decay_steps = int(num_batches_per_epoch * cnnt_input.NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
# lr = tf.train.exponential_decay(cnnt_input.INITIAL_LEARNING_RATE,
# global_step,
# decay_steps,
# cnnt_input.LEARNING_RATE_DECAY_FACTOR,
# staircase=True)
# Create an optimizer that performs gradient descent.
# opt = tf.train.GradientDescentOptimizer(lr)
opt = tf.train.AdamOptimizer(1e-3)
# Calculate the gradients for each model tower.
tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
with tf.device('/gpu:%d' % 0):
with tf.name_scope('%s_%d_dev' % (cnn_model.TOWER_NAME, 0)) as namescope:
loss_dev, accuracy_dev, logits_dev, y_true_dev, y_pred_dev = \
tower_loss(namescope, 'dev', batch_size=1588)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
with tf.device('/gpu:%d' % 1):
with tf.name_scope('%s_%d_tst' % (cnn_model.TOWER_NAME, 0)) as namescope:
loss_tst, accuracy_tst, logits_tst, y_true_tst, y_pred_tst = \
tower_loss(namescope, 'tst', batch_size=20632)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (cnn_model.TOWER_NAME, i)) as namescope:
# Calculate the loss for one tower of the CIFAR model. This function
# constructs the entire CIFAR model but shares the variables across
# all towers.
loss, accuracy, _, _, _ = tower_loss(namescope, 'trn', batch_size=FLAGS.batch_size)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, namescope)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# # Add a summary to track the learning rate.
# summaries.append(tf.summary.scalar('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
cnnt_input.MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op)
# Create a saver.
saver = tf.train.Saver(tf.global_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
# gpu_options = tf.GPUOptions(visible_device_list=str('0,1,2,3'), allow_growth=True) # d
# gpu_options = tf.GPUOptions(visible_device_list=str('1,2,3'), allow_growth=True) # d
# gpu_options = tf.GPUOptions(visible_device_list=str('0,2,3'), allow_growth=True) # d
gpu_options = tf.GPUOptions(visible_device_list=str('2,3'), allow_growth=True) # o
# gpu_options = tf.GPUOptions(visible_device_list=str('1,2'), allow_growth=True) # d
sess = tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options,
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
# _, loss_value = sess.run([train_op, loss])
_, loss_value, accuracy_val = sess.run([train_op, loss, accuracy])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / FLAGS.num_gpus
format_str = ('%s: step %d, loss = %.4f, acc = %.4f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value, accuracy_val,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
loss_dev_value, accuracy_dev_value, logits_dev_value, y_true_dev_value, y_pred_dev_value = \
sess.run([loss_dev, accuracy_dev, logits_dev, y_true_dev, y_pred_dev])
f1_neg_dev = f1_score(y_true_dev_value==0, y_pred_dev_value==0)
f1_pos_dev = f1_score(y_true_dev_value == 2, y_pred_dev_value == 2)
f1_avg_dev = (f1_neg_dev+f1_pos_dev)/2
format_str = ('[Eval] %s: step %d, loss = %.4f, acc = %.4f, f1neg = %.4f, f1pos = %.4f, f1 = %.4f')
print(format_str % (datetime.now(), step, loss_dev_value, accuracy_dev_value,
f1_neg_dev, f1_pos_dev, f1_avg_dev))
loss_tst_value, accuracy_tst_value, logits_tst_value, y_true_tst_value, y_pred_tst_value = \
sess.run([loss_tst, accuracy_tst, logits_tst, y_true_tst, y_pred_tst])
f1_neg_tst = f1_score(y_true_tst_value == 0, y_pred_tst_value == 0)
f1_pos_tst = f1_score(y_true_tst_value == 2, y_pred_tst_value == 2)
f1_avg_tst = (f1_neg_tst + f1_pos_tst) / 2
format_str = ('[Test] %s: step %d, loss = %.4f, acc = %.4f, f1neg = %.4f, f1pos = %.4f, f1 = %.4f')
print(format_str % (datetime.now(), step, loss_tst_value, accuracy_tst_value,
f1_neg_tst, f1_pos_tst, f1_avg_tst))
if maxdev<f1_avg_dev:
maxdev = f1_avg_dev
maxtst = f1_avg_tst
maxindex = step
format_str = ('[Status] %s: step %d, maxindex = %d, maxdev = %.4f, maxtst = %.4f')
print(format_str % (datetime.now(), step, maxindex, maxdev, maxtst))
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
tkaitchuck/nupic | external/darwin64/lib/python2.6/site-packages/matplotlib/axis.py | 69 | 54453 | """
Classes for the ticks and x and y axis
"""
from __future__ import division
from matplotlib import rcParams
import matplotlib.artist as artist
import matplotlib.cbook as cbook
import matplotlib.font_manager as font_manager
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.scale as mscale
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.units as munits
class Tick(artist.Artist):
"""
Abstract base class for the axis ticks, grid lines and labels
1 refers to the bottom of the plot for xticks and the left for yticks
2 refers to the top of the plot for xticks and the right for yticks
Publicly accessible attributes:
:attr:`tick1line`
a Line2D instance
:attr:`tick2line`
a Line2D instance
:attr:`gridline`
a Line2D instance
:attr:`label1`
a Text instance
:attr:`label2`
a Text instance
:attr:`gridOn`
a boolean which determines whether to draw the tickline
:attr:`tick1On`
a boolean which determines whether to draw the 1st tickline
:attr:`tick2On`
a boolean which determines whether to draw the 2nd tickline
:attr:`label1On`
a boolean which determines whether to draw tick label
:attr:`label2On`
a boolean which determines whether to draw tick label
"""
def __init__(self, axes, loc, label,
size = None, # points
gridOn = None, # defaults to axes.grid
tick1On = True,
tick2On = True,
label1On = True,
label2On = False,
major = True,
):
"""
bbox is the Bound2D bounding box in display coords of the Axes
loc is the tick location in data coords
size is the tick size in relative, axes coords
"""
artist.Artist.__init__(self)
if gridOn is None: gridOn = rcParams['axes.grid']
self.set_figure(axes.figure)
self.axes = axes
name = self.__name__.lower()
if size is None:
if major:
size = rcParams['%s.major.size'%name]
pad = rcParams['%s.major.pad'%name]
else:
size = rcParams['%s.minor.size'%name]
pad = rcParams['%s.minor.pad'%name]
self._tickdir = rcParams['%s.direction'%name]
if self._tickdir == 'in':
self._xtickmarkers = (mlines.TICKUP, mlines.TICKDOWN)
self._ytickmarkers = (mlines.TICKRIGHT, mlines.TICKLEFT)
self._pad = pad
else:
self._xtickmarkers = (mlines.TICKDOWN, mlines.TICKUP)
self._ytickmarkers = (mlines.TICKLEFT, mlines.TICKRIGHT)
self._pad = pad + size
self._loc = loc
self._size = size
self.tick1line = self._get_tick1line()
self.tick2line = self._get_tick2line()
self.gridline = self._get_gridline()
self.label1 = self._get_text1()
self.label = self.label1 # legacy name
self.label2 = self._get_text2()
self.gridOn = gridOn
self.tick1On = tick1On
self.tick2On = tick2On
self.label1On = label1On
self.label2On = label2On
self.update_position(loc)
def get_children(self):
children = [self.tick1line, self.tick2line, self.gridline, self.label1, self.label2]
return children
def set_clip_path(self, clippath, transform=None):
artist.Artist.set_clip_path(self, clippath, transform)
#self.tick1line.set_clip_path(clippath, transform)
#self.tick2line.set_clip_path(clippath, transform)
self.gridline.set_clip_path(clippath, transform)
set_clip_path.__doc__ = artist.Artist.set_clip_path.__doc__
def get_pad_pixels(self):
return self.figure.dpi * self._pad / 72.0
def contains(self, mouseevent):
"""
Test whether the mouse event occured in the Tick marks.
This function always returns false. It is more useful to test if the
axis as a whole contains the mouse rather than the set of tick marks.
"""
if callable(self._contains): return self._contains(self,mouseevent)
return False,{}
def set_pad(self, val):
"""
Set the tick label pad in points
ACCEPTS: float
"""
self._pad = val
def get_pad(self):
'Get the value of the tick label pad in points'
return self._pad
def _get_text1(self):
'Get the default Text 1 instance'
pass
def _get_text2(self):
'Get the default Text 2 instance'
pass
def _get_tick1line(self):
'Get the default line2D instance for tick1'
pass
def _get_tick2line(self):
'Get the default line2D instance for tick2'
pass
def _get_gridline(self):
'Get the default grid Line2d instance for this tick'
pass
def get_loc(self):
'Return the tick location (data coords) as a scalar'
return self._loc
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__name__)
midPoint = mtransforms.interval_contains(self.get_view_interval(), self.get_loc())
if midPoint:
if self.gridOn:
self.gridline.draw(renderer)
if self.tick1On:
self.tick1line.draw(renderer)
if self.tick2On:
self.tick2line.draw(renderer)
if self.label1On:
self.label1.draw(renderer)
if self.label2On:
self.label2.draw(renderer)
renderer.close_group(self.__name__)
def set_label1(self, s):
"""
Set the text of ticklabel
ACCEPTS: str
"""
self.label1.set_text(s)
set_label = set_label1
def set_label2(self, s):
"""
Set the text of ticklabel2
ACCEPTS: str
"""
self.label2.set_text(s)
def _set_artist_props(self, a):
a.set_figure(self.figure)
#if isinstance(a, mlines.Line2D): a.set_clip_box(self.axes.bbox)
def get_view_interval(self):
'return the view Interval instance for the axis this tick is ticking'
raise NotImplementedError('Derived must override')
def set_view_interval(self, vmin, vmax, ignore=False):
raise NotImplementedError('Derived must override')
class XTick(Tick):
"""
Contains all the Artists needed to make an x tick - the tick line,
the label text and the grid line
"""
__name__ = 'xtick'
def _get_text1(self):
'Get the default Text instance'
# the y loc is 3 points below the min of y axis
# get the affine as an a,b,c,d,tx,ty list
# x in data coords, y in axes coords
#t = mtext.Text(
trans, vert, horiz = self.axes.get_xaxis_text1_transform(self._pad)
size = rcParams['xtick.labelsize']
t = mtext.Text(
x=0, y=0,
fontproperties=font_manager.FontProperties(size=size),
color=rcParams['xtick.color'],
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_text2(self):
'Get the default Text 2 instance'
# x in data coords, y in axes coords
#t = mtext.Text(
trans, vert, horiz = self.axes.get_xaxis_text2_transform(self._pad)
t = mtext.Text(
x=0, y=1,
fontproperties=font_manager.FontProperties(size=rcParams['xtick.labelsize']),
color=rcParams['xtick.color'],
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_tick1line(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0,), ydata=(0,),
color='k',
linestyle = 'None',
marker = self._xtickmarkers[0],
markersize=self._size,
)
l.set_transform(self.axes.get_xaxis_transform())
self._set_artist_props(l)
return l
def _get_tick2line(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D( xdata=(0,), ydata=(1,),
color='k',
linestyle = 'None',
marker = self._xtickmarkers[1],
markersize=self._size,
)
l.set_transform(self.axes.get_xaxis_transform())
self._set_artist_props(l)
return l
def _get_gridline(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0.0, 0.0), ydata=(0, 1.0),
color=rcParams['grid.color'],
linestyle=rcParams['grid.linestyle'],
linewidth=rcParams['grid.linewidth'],
)
l.set_transform(self.axes.get_xaxis_transform())
self._set_artist_props(l)
return l
def update_position(self, loc):
'Set the location of tick in data coords with scalar *loc*'
x = loc
nonlinear = (hasattr(self.axes, 'yaxis') and
self.axes.yaxis.get_scale() != 'linear' or
hasattr(self.axes, 'xaxis') and
self.axes.xaxis.get_scale() != 'linear')
if self.tick1On:
self.tick1line.set_xdata((x,))
if self.tick2On:
self.tick2line.set_xdata((x,))
if self.gridOn:
self.gridline.set_xdata((x,))
if self.label1On:
self.label1.set_x(x)
if self.label2On:
self.label2.set_x(x)
if nonlinear:
self.tick1line._invalid = True
self.tick2line._invalid = True
self.gridline._invalid = True
self._loc = loc
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervalx
def set_view_interval(self, vmin, vmax, ignore = False):
if ignore:
self.axes.viewLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.axes.viewLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
def get_minpos(self):
return self.axes.dataLim.minposx
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervalx
class YTick(Tick):
"""
Contains all the Artists needed to make a Y tick - the tick line,
the label text and the grid line
"""
__name__ = 'ytick'
# how far from the y axis line the right of the ticklabel are
def _get_text1(self):
'Get the default Text instance'
# x in axes coords, y in data coords
#t = mtext.Text(
trans, vert, horiz = self.axes.get_yaxis_text1_transform(self._pad)
t = mtext.Text(
x=0, y=0,
fontproperties=font_manager.FontProperties(size=rcParams['ytick.labelsize']),
color=rcParams['ytick.color'],
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
#t.set_transform( self.axes.transData )
self._set_artist_props(t)
return t
def _get_text2(self):
'Get the default Text instance'
# x in axes coords, y in data coords
#t = mtext.Text(
trans, vert, horiz = self.axes.get_yaxis_text2_transform(self._pad)
t = mtext.Text(
x=1, y=0,
fontproperties=font_manager.FontProperties(size=rcParams['ytick.labelsize']),
color=rcParams['ytick.color'],
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_tick1line(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D( (0,), (0,), color='k',
marker = self._ytickmarkers[0],
linestyle = 'None',
markersize=self._size,
)
l.set_transform(self.axes.get_yaxis_transform())
self._set_artist_props(l)
return l
def _get_tick2line(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D( (1,), (0,), color='k',
marker = self._ytickmarkers[1],
linestyle = 'None',
markersize=self._size,
)
l.set_transform(self.axes.get_yaxis_transform())
self._set_artist_props(l)
return l
def _get_gridline(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D( xdata=(0,1), ydata=(0, 0),
color=rcParams['grid.color'],
linestyle=rcParams['grid.linestyle'],
linewidth=rcParams['grid.linewidth'],
)
l.set_transform(self.axes.get_yaxis_transform())
self._set_artist_props(l)
return l
def update_position(self, loc):
'Set the location of tick in data coords with scalar loc'
y = loc
nonlinear = (hasattr(self.axes, 'yaxis') and
self.axes.yaxis.get_scale() != 'linear' or
hasattr(self.axes, 'xaxis') and
self.axes.xaxis.get_scale() != 'linear')
if self.tick1On:
self.tick1line.set_ydata((y,))
if self.tick2On:
self.tick2line.set_ydata((y,))
if self.gridOn:
self.gridline.set_ydata((y, ))
if self.label1On:
self.label1.set_y( y )
if self.label2On:
self.label2.set_y( y )
if nonlinear:
self.tick1line._invalid = True
self.tick2line._invalid = True
self.gridline._invalid = True
self._loc = loc
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervaly
def set_view_interval(self, vmin, vmax, ignore = False):
if ignore:
self.axes.viewLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.axes.viewLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
def get_minpos(self):
return self.axes.dataLim.minposy
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervaly
class Ticker:
locator = None
formatter = None
class Axis(artist.Artist):
"""
Public attributes
* :attr:`transData` - transform data coords to display coords
* :attr:`transAxis` - transform axis coords to display coords
"""
LABELPAD = 5
OFFSETTEXTPAD = 3
def __str__(self):
return self.__class__.__name__ \
+ "(%f,%f)"%tuple(self.axes.transAxes.transform_point((0,0)))
def __init__(self, axes, pickradius=15):
"""
Init the axis with the parent Axes instance
"""
artist.Artist.__init__(self)
self.set_figure(axes.figure)
self.axes = axes
self.major = Ticker()
self.minor = Ticker()
self.callbacks = cbook.CallbackRegistry(('units', 'units finalize'))
#class dummy:
# locator = None
# formatter = None
#self.major = dummy()
#self.minor = dummy()
self._autolabelpos = True
self.label = self._get_label()
self.offsetText = self._get_offset_text()
self.majorTicks = []
self.minorTicks = []
self.pickradius = pickradius
self.cla()
self.set_scale('linear')
def set_label_coords(self, x, y, transform=None):
"""
Set the coordinates of the label. By default, the x
coordinate of the y label is determined by the tick label
bounding boxes, but this can lead to poor alignment of
multiple ylabels if there are multiple axes. Ditto for the y
coodinate of the x label.
You can also specify the coordinate system of the label with
the transform. If None, the default coordinate system will be
the axes coordinate system (0,0) is (left,bottom), (0.5, 0.5)
is middle, etc
"""
self._autolabelpos = False
if transform is None:
transform = self.axes.transAxes
self.label.set_transform(transform)
self.label.set_position((x, y))
def get_transform(self):
return self._scale.get_transform()
def get_scale(self):
return self._scale.name
def set_scale(self, value, **kwargs):
self._scale = mscale.scale_factory(value, self, **kwargs)
self._scale.set_default_locators_and_formatters(self)
def limit_range_for_scale(self, vmin, vmax):
return self._scale.limit_range_for_scale(vmin, vmax, self.get_minpos())
def get_children(self):
children = [self.label]
majorticks = self.get_major_ticks()
minorticks = self.get_minor_ticks()
children.extend(majorticks)
children.extend(minorticks)
return children
def cla(self):
'clear the current axis'
self.set_major_locator(mticker.AutoLocator())
self.set_major_formatter(mticker.ScalarFormatter())
self.set_minor_locator(mticker.NullLocator())
self.set_minor_formatter(mticker.NullFormatter())
# Clear the callback registry for this axis, or it may "leak"
self.callbacks = cbook.CallbackRegistry(('units', 'units finalize'))
# whether the grids are on
self._gridOnMajor = rcParams['axes.grid']
self._gridOnMinor = False
self.label.set_text('')
self._set_artist_props(self.label)
# build a few default ticks; grow as necessary later; only
# define 1 so properties set on ticks will be copied as they
# grow
cbook.popall(self.majorTicks)
cbook.popall(self.minorTicks)
self.majorTicks.extend([self._get_tick(major=True)])
self.minorTicks.extend([self._get_tick(major=False)])
self._lastNumMajorTicks = 1
self._lastNumMinorTicks = 1
self.converter = None
self.units = None
self.set_units(None)
def set_clip_path(self, clippath, transform=None):
artist.Artist.set_clip_path(self, clippath, transform)
majorticks = self.get_major_ticks()
minorticks = self.get_minor_ticks()
for child in self.majorTicks + self.minorTicks:
child.set_clip_path(clippath, transform)
def get_view_interval(self):
'return the Interval instance for this axis view limits'
raise NotImplementedError('Derived must override')
def set_view_interval(self, vmin, vmax, ignore=False):
raise NotImplementedError('Derived must override')
def get_data_interval(self):
'return the Interval instance for this axis data limits'
raise NotImplementedError('Derived must override')
def set_data_interval(self):
'Set the axis data limits'
raise NotImplementedError('Derived must override')
def _set_artist_props(self, a):
if a is None: return
a.set_figure(self.figure)
def iter_ticks(self):
"""
Iterate through all of the major and minor ticks.
"""
majorLocs = self.major.locator()
majorTicks = self.get_major_ticks(len(majorLocs))
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i) for i, val in enumerate(majorLocs)]
minorLocs = self.minor.locator()
minorTicks = self.get_minor_ticks(len(minorLocs))
self.minor.formatter.set_locs(minorLocs)
minorLabels = [self.minor.formatter(val, i) for i, val in enumerate(minorLocs)]
major_minor = [
(majorTicks, majorLocs, majorLabels),
(minorTicks, minorLocs, minorLabels)]
for group in major_minor:
for tick in zip(*group):
yield tick
def get_ticklabel_extents(self, renderer):
"""
Get the extents of the tick labels on either side
of the axes.
"""
ticklabelBoxes = []
ticklabelBoxes2 = []
interval = self.get_view_interval()
for tick, loc, label in self.iter_ticks():
if tick is None: continue
if not mtransforms.interval_contains(interval, loc): continue
tick.update_position(loc)
tick.set_label1(label)
tick.set_label2(label)
if tick.label1On and tick.label1.get_visible():
extent = tick.label1.get_window_extent(renderer)
ticklabelBoxes.append(extent)
if tick.label2On and tick.label2.get_visible():
extent = tick.label2.get_window_extent(renderer)
ticklabelBoxes2.append(extent)
if len(ticklabelBoxes):
bbox = mtransforms.Bbox.union(ticklabelBoxes)
else:
bbox = mtransforms.Bbox.from_extents(0, 0, 0, 0)
if len(ticklabelBoxes2):
bbox2 = mtransforms.Bbox.union(ticklabelBoxes2)
else:
bbox2 = mtransforms.Bbox.from_extents(0, 0, 0, 0)
return bbox, bbox2
def draw(self, renderer, *args, **kwargs):
'Draw the axis lines, grid lines, tick lines and labels'
ticklabelBoxes = []
ticklabelBoxes2 = []
if not self.get_visible(): return
renderer.open_group(__name__)
interval = self.get_view_interval()
for tick, loc, label in self.iter_ticks():
if tick is None: continue
if not mtransforms.interval_contains(interval, loc): continue
tick.update_position(loc)
tick.set_label1(label)
tick.set_label2(label)
tick.draw(renderer)
if tick.label1On and tick.label1.get_visible():
extent = tick.label1.get_window_extent(renderer)
ticklabelBoxes.append(extent)
if tick.label2On and tick.label2.get_visible():
extent = tick.label2.get_window_extent(renderer)
ticklabelBoxes2.append(extent)
# scale up the axis label box to also find the neighbors, not
# just the tick labels that actually overlap note we need a
# *copy* of the axis label box because we don't wan't to scale
# the actual bbox
self._update_label_position(ticklabelBoxes, ticklabelBoxes2)
self.label.draw(renderer)
self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
self.offsetText.set_text( self.major.formatter.get_offset() )
self.offsetText.draw(renderer)
if 0: # draw the bounding boxes around the text for debug
for tick in majorTicks:
label = tick.label1
mpatches.bbox_artist(label, renderer)
mpatches.bbox_artist(self.label, renderer)
renderer.close_group(__name__)
def _get_label(self):
raise NotImplementedError('Derived must override')
def _get_offset_text(self):
raise NotImplementedError('Derived must override')
def get_gridlines(self):
'Return the grid lines as a list of Line2D instance'
ticks = self.get_major_ticks()
return cbook.silent_list('Line2D gridline', [tick.gridline for tick in ticks])
def get_label(self):
'Return the axis label as a Text instance'
return self.label
def get_offset_text(self):
'Return the axis offsetText as a Text instance'
return self.offsetText
def get_pickradius(self):
'Return the depth of the axis used by the picker'
return self.pickradius
def get_majorticklabels(self):
'Return a list of Text instances for the major ticklabels'
ticks = self.get_major_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1On]
labels2 = [tick.label2 for tick in ticks if tick.label2On]
return cbook.silent_list('Text major ticklabel', labels1+labels2)
def get_minorticklabels(self):
'Return a list of Text instances for the minor ticklabels'
ticks = self.get_minor_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1On]
labels2 = [tick.label2 for tick in ticks if tick.label2On]
return cbook.silent_list('Text minor ticklabel', labels1+labels2)
def get_ticklabels(self, minor=False):
'Return a list of Text instances for ticklabels'
if minor:
return self.get_minorticklabels()
return self.get_majorticklabels()
def get_majorticklines(self):
'Return the major tick lines as a list of Line2D instances'
lines = []
ticks = self.get_major_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_minorticklines(self):
'Return the minor tick lines as a list of Line2D instances'
lines = []
ticks = self.get_minor_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_ticklines(self, minor=False):
'Return the tick lines as a list of Line2D instances'
if minor:
return self.get_minorticklines()
return self.get_majorticklines()
def get_majorticklocs(self):
"Get the major tick locations in data coordinates as a numpy array"
return self.major.locator()
def get_minorticklocs(self):
"Get the minor tick locations in data coordinates as a numpy array"
return self.minor.locator()
def get_ticklocs(self, minor=False):
"Get the tick locations in data coordinates as a numpy array"
if minor:
return self.minor.locator()
return self.major.locator()
def _get_tick(self, major):
'return the default tick intsance'
raise NotImplementedError('derived must override')
def _copy_tick_props(self, src, dest):
'Copy the props from src tick to dest tick'
if src is None or dest is None: return
dest.label1.update_from(src.label1)
dest.label2.update_from(src.label2)
dest.tick1line.update_from(src.tick1line)
dest.tick2line.update_from(src.tick2line)
dest.gridline.update_from(src.gridline)
dest.tick1On = src.tick1On
dest.tick2On = src.tick2On
dest.label1On = src.label1On
dest.label2On = src.label2On
def get_major_locator(self):
'Get the locator of the major ticker'
return self.major.locator
def get_minor_locator(self):
'Get the locator of the minor ticker'
return self.minor.locator
def get_major_formatter(self):
'Get the formatter of the major ticker'
return self.major.formatter
def get_minor_formatter(self):
'Get the formatter of the minor ticker'
return self.minor.formatter
def get_major_ticks(self, numticks=None):
'get the tick instances; grow as necessary'
if numticks is None:
numticks = len(self.get_major_locator()())
if len(self.majorTicks) < numticks:
# update the new tick label properties from the old
for i in range(numticks - len(self.majorTicks)):
tick = self._get_tick(major=True)
self.majorTicks.append(tick)
if self._lastNumMajorTicks < numticks:
protoTick = self.majorTicks[0]
for i in range(self._lastNumMajorTicks, len(self.majorTicks)):
tick = self.majorTicks[i]
if self._gridOnMajor: tick.gridOn = True
self._copy_tick_props(protoTick, tick)
self._lastNumMajorTicks = numticks
ticks = self.majorTicks[:numticks]
return ticks
def get_minor_ticks(self, numticks=None):
'get the minor tick instances; grow as necessary'
if numticks is None:
numticks = len(self.get_minor_locator()())
if len(self.minorTicks) < numticks:
# update the new tick label properties from the old
for i in range(numticks - len(self.minorTicks)):
tick = self._get_tick(major=False)
self.minorTicks.append(tick)
if self._lastNumMinorTicks < numticks:
protoTick = self.minorTicks[0]
for i in range(self._lastNumMinorTicks, len(self.minorTicks)):
tick = self.minorTicks[i]
if self._gridOnMinor: tick.gridOn = True
self._copy_tick_props(protoTick, tick)
self._lastNumMinorTicks = numticks
ticks = self.minorTicks[:numticks]
return ticks
def grid(self, b=None, which='major', **kwargs):
"""
Set the axis grid on or off; b is a boolean use *which* =
'major' | 'minor' to set the grid for major or minor ticks
if *b* is *None* and len(kwargs)==0, toggle the grid state. If
*kwargs* are supplied, it is assumed you want the grid on and *b*
will be set to True
*kwargs* are used to set the line properties of the grids, eg,
xax.grid(color='r', linestyle='-', linewidth=2)
"""
if len(kwargs): b = True
if which.lower().find('minor')>=0:
if b is None: self._gridOnMinor = not self._gridOnMinor
else: self._gridOnMinor = b
for tick in self.minorTicks: # don't use get_ticks here!
if tick is None: continue
tick.gridOn = self._gridOnMinor
if len(kwargs): artist.setp(tick.gridline,**kwargs)
else:
if b is None: self._gridOnMajor = not self._gridOnMajor
else: self._gridOnMajor = b
for tick in self.majorTicks: # don't use get_ticks here!
if tick is None: continue
tick.gridOn = self._gridOnMajor
if len(kwargs): artist.setp(tick.gridline,**kwargs)
def update_units(self, data):
"""
introspect *data* for units converter and update the
axis.converter instance if necessary. Return *True* is *data* is
registered for unit conversion
"""
converter = munits.registry.get_converter(data)
if converter is None: return False
self.converter = converter
default = self.converter.default_units(data)
#print 'update units: default="%s", units=%s"'%(default, self.units)
if default is not None and self.units is None:
self.set_units(default)
self._update_axisinfo()
return True
def _update_axisinfo(self):
"""
check the axis converter for the stored units to see if the
axis info needs to be updated
"""
if self.converter is None:
return
info = self.converter.axisinfo(self.units)
if info is None:
return
if info.majloc is not None and self.major.locator!=info.majloc:
self.set_major_locator(info.majloc)
if info.minloc is not None and self.minor.locator!=info.minloc:
self.set_minor_locator(info.minloc)
if info.majfmt is not None and self.major.formatter!=info.majfmt:
self.set_major_formatter(info.majfmt)
if info.minfmt is not None and self.minor.formatter!=info.minfmt:
self.set_minor_formatter(info.minfmt)
if info.label is not None:
label = self.get_label()
label.set_text(info.label)
def have_units(self):
return self.converter is not None or self.units is not None
def convert_units(self, x):
if self.converter is None:
self.converter = munits.registry.get_converter(x)
if self.converter is None:
#print 'convert_units returning identity: units=%s, converter=%s'%(self.units, self.converter)
return x
ret = self.converter.convert(x, self.units)
#print 'convert_units converting: axis=%s, units=%s, converter=%s, in=%s, out=%s'%(self, self.units, self.converter, x, ret)
return ret
def set_units(self, u):
"""
set the units for axis
ACCEPTS: a units tag
"""
pchanged = False
if u is None:
self.units = None
pchanged = True
else:
if u!=self.units:
self.units = u
#print 'setting units', self.converter, u, munits.registry.get_converter(u)
pchanged = True
if pchanged:
self._update_axisinfo()
self.callbacks.process('units')
self.callbacks.process('units finalize')
def get_units(self):
'return the units for axis'
return self.units
def set_major_formatter(self, formatter):
"""
Set the formatter of the major ticker
ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
"""
self.major.formatter = formatter
formatter.set_axis(self)
def set_minor_formatter(self, formatter):
"""
Set the formatter of the minor ticker
ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
"""
self.minor.formatter = formatter
formatter.set_axis(self)
def set_major_locator(self, locator):
"""
Set the locator of the major ticker
ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
"""
self.major.locator = locator
locator.set_axis(self)
def set_minor_locator(self, locator):
"""
Set the locator of the minor ticker
ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
"""
self.minor.locator = locator
locator.set_axis(self)
def set_pickradius(self, pickradius):
"""
Set the depth of the axis used by the picker
ACCEPTS: a distance in points
"""
self.pickradius = pickradius
def set_ticklabels(self, ticklabels, *args, **kwargs):
"""
Set the text values of the tick labels. Return a list of Text
instances. Use *kwarg* *minor=True* to select minor ticks.
ACCEPTS: sequence of strings
"""
#ticklabels = [str(l) for l in ticklabels]
minor = kwargs.pop('minor', False)
if minor:
self.set_minor_formatter(mticker.FixedFormatter(ticklabels))
ticks = self.get_minor_ticks()
else:
self.set_major_formatter( mticker.FixedFormatter(ticklabels) )
ticks = self.get_major_ticks()
self.set_major_formatter( mticker.FixedFormatter(ticklabels) )
ret = []
for i, tick in enumerate(ticks):
if i<len(ticklabels):
tick.label1.set_text(ticklabels[i])
ret.append(tick.label1)
tick.label1.update(kwargs)
return ret
def set_ticks(self, ticks, minor=False):
"""
Set the locations of the tick marks from sequence ticks
ACCEPTS: sequence of floats
"""
### XXX if the user changes units, the information will be lost here
ticks = self.convert_units(ticks)
if len(ticks) > 1:
xleft, xright = self.get_view_interval()
if xright > xleft:
self.set_view_interval(min(ticks), max(ticks))
else:
self.set_view_interval(max(ticks), min(ticks))
if minor:
self.set_minor_locator(mticker.FixedLocator(ticks))
return self.get_minor_ticks(len(ticks))
else:
self.set_major_locator( mticker.FixedLocator(ticks) )
return self.get_major_ticks(len(ticks))
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
raise NotImplementedError('Derived must override')
def _update_offset_text_postion(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
raise NotImplementedError('Derived must override')
def pan(self, numsteps):
'Pan *numsteps* (can be positive or negative)'
self.major.locator.pan(numsteps)
def zoom(self, direction):
"Zoom in/out on axis; if *direction* is >0 zoom in, else zoom out"
self.major.locator.zoom(direction)
class XAxis(Axis):
__name__ = 'xaxis'
axis_name = 'x'
def contains(self,mouseevent):
"""Test whether the mouse event occured in the x axis.
"""
if callable(self._contains): return self._contains(self,mouseevent)
x,y = mouseevent.x,mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes,yaxes = trans.transform_point((x,y))
except ValueError:
return False, {}
l,b = self.axes.transAxes.transform_point((0,0))
r,t = self.axes.transAxes.transform_point((1,1))
inaxis = xaxes>=0 and xaxes<=1 and (
(y<b and y>b-self.pickradius) or
(y>t and y<t+self.pickradius))
return inaxis, {}
def _get_tick(self, major):
return XTick(self.axes, 0, '', major=major)
def _get_label(self):
# x in axes coords, y in display coords (to be updated at draw
# time by _update_label_positions)
label = mtext.Text(x=0.5, y=0,
fontproperties = font_manager.FontProperties(size=rcParams['axes.labelsize']),
color = rcParams['axes.labelcolor'],
verticalalignment='top',
horizontalalignment='center',
)
label.set_transform( mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform() ))
self._set_artist_props(label)
self.label_position='bottom'
return label
def _get_offset_text(self):
# x in axes coords, y in display coords (to be updated at draw time)
offsetText = mtext.Text(x=1, y=0,
fontproperties = font_manager.FontProperties(size=rcParams['xtick.labelsize']),
color = rcParams['xtick.color'],
verticalalignment='top',
horizontalalignment='right',
)
offsetText.set_transform( mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform() ))
self._set_artist_props(offsetText)
self.offset_text_position='bottom'
return offsetText
def get_label_position(self):
"""
Return the label position (top or bottom)
"""
return self.label_position
def set_label_position(self, position):
"""
Set the label position (top or bottom)
ACCEPTS: [ 'top' | 'bottom' ]
"""
assert position == 'top' or position == 'bottom'
if position == 'top':
self.label.set_verticalalignment('bottom')
else:
self.label.set_verticalalignment('top')
self.label_position=position
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
if not self._autolabelpos: return
x,y = self.label.get_position()
if self.label_position == 'bottom':
if not len(bboxes):
bottom = self.axes.bbox.ymin
else:
bbox = mtransforms.Bbox.union(bboxes)
bottom = bbox.y0
self.label.set_position( (x, bottom - self.LABELPAD*self.figure.dpi / 72.0))
else:
if not len(bboxes2):
top = self.axes.bbox.ymax
else:
bbox = mtransforms.Bbox.union(bboxes2)
top = bbox.y1
self.label.set_position( (x, top+self.LABELPAD*self.figure.dpi / 72.0))
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x,y = self.offsetText.get_position()
if not len(bboxes):
bottom = self.axes.bbox.ymin
else:
bbox = mtransforms.Bbox.union(bboxes)
bottom = bbox.y0
self.offsetText.set_position((x, bottom-self.OFFSETTEXTPAD*self.figure.dpi/72.0))
def get_text_heights(self, renderer):
"""
Returns the amount of space one should reserve for text
above and below the axes. Returns a tuple (above, below)
"""
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
above = 0.0
if bbox2.height:
above += bbox2.height + padPixels
below = 0.0
if bbox.height:
below += bbox.height + padPixels
if self.get_label_position() == 'top':
above += self.label.get_window_extent(renderer).height + padPixels
else:
below += self.label.get_window_extent(renderer).height + padPixels
return above, below
def set_ticks_position(self, position):
"""
Set the ticks position (top, bottom, both, default or none)
both sets the ticks to appear on both positions, but does not
change the tick labels. default resets the tick positions to
the default: ticks on both positions, labels at bottom. none
can be used if you don't want any ticks.
ACCEPTS: [ 'top' | 'bottom' | 'both' | 'default' | 'none' ]
"""
assert position in ('top', 'bottom', 'both', 'default', 'none')
ticks = list( self.get_major_ticks() ) # a copy
ticks.extend( self.get_minor_ticks() )
if position == 'top':
for t in ticks:
t.tick1On = False
t.tick2On = True
t.label1On = False
t.label2On = True
elif position == 'bottom':
for t in ticks:
t.tick1On = True
t.tick2On = False
t.label1On = True
t.label2On = False
elif position == 'default':
for t in ticks:
t.tick1On = True
t.tick2On = True
t.label1On = True
t.label2On = False
elif position == 'none':
for t in ticks:
t.tick1On = False
t.tick2On = False
else:
for t in ticks:
t.tick1On = True
t.tick2On = True
for t in ticks:
t.update_position(t._loc)
def tick_top(self):
'use ticks only on top'
self.set_ticks_position('top')
def tick_bottom(self):
'use ticks only on bottom'
self.set_ticks_position('bottom')
def get_ticks_position(self):
"""
Return the ticks position (top, bottom, default or unknown)
"""
majt=self.majorTicks[0]
mT=self.minorTicks[0]
majorTop=(not majt.tick1On) and majt.tick2On and (not majt.label1On) and majt.label2On
minorTop=(not mT.tick1On) and mT.tick2On and (not mT.label1On) and mT.label2On
if majorTop and minorTop: return 'top'
MajorBottom=majt.tick1On and (not majt.tick2On) and majt.label1On and (not majt.label2On)
MinorBottom=mT.tick1On and (not mT.tick2On) and mT.label1On and (not mT.label2On)
if MajorBottom and MinorBottom: return 'bottom'
majorDefault=majt.tick1On and majt.tick2On and majt.label1On and (not majt.label2On)
minorDefault=mT.tick1On and mT.tick2On and mT.label1On and (not mT.label2On)
if majorDefault and minorDefault: return 'default'
return 'unknown'
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervalx
def set_view_interval(self, vmin, vmax, ignore=False):
if ignore:
self.axes.viewLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.axes.viewLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
def get_minpos(self):
return self.axes.dataLim.minposx
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervalx
def set_data_interval(self, vmin, vmax, ignore=False):
'return the Interval instance for this axis data limits'
if ignore:
self.axes.dataLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_data_interval()
self.axes.dataLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
class YAxis(Axis):
__name__ = 'yaxis'
axis_name = 'y'
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the y axis.
Returns *True* | *False*
"""
if callable(self._contains): return self._contains(self,mouseevent)
x,y = mouseevent.x,mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes,yaxes = trans.transform_point((x,y))
except ValueError:
return False, {}
l,b = self.axes.transAxes.transform_point((0,0))
r,t = self.axes.transAxes.transform_point((1,1))
inaxis = yaxes>=0 and yaxes<=1 and (
(x<l and x>l-self.pickradius) or
(x>r and x<r+self.pickradius))
return inaxis, {}
def _get_tick(self, major):
return YTick(self.axes, 0, '', major=major)
def _get_label(self):
# x in display coords (updated by _update_label_position)
# y in axes coords
label = mtext.Text(x=0, y=0.5,
# todo: get the label position
fontproperties=font_manager.FontProperties(size=rcParams['axes.labelsize']),
color = rcParams['axes.labelcolor'],
verticalalignment='center',
horizontalalignment='right',
rotation='vertical',
)
label.set_transform( mtransforms.blended_transform_factory(
mtransforms.IdentityTransform(), self.axes.transAxes) )
self._set_artist_props(label)
self.label_position='left'
return label
def _get_offset_text(self):
# x in display coords, y in axes coords (to be updated at draw time)
offsetText = mtext.Text(x=0, y=0.5,
fontproperties = font_manager.FontProperties(size=rcParams['ytick.labelsize']),
color = rcParams['ytick.color'],
verticalalignment = 'bottom',
horizontalalignment = 'left',
)
offsetText.set_transform(mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()) )
self._set_artist_props(offsetText)
self.offset_text_position='left'
return offsetText
def get_label_position(self):
"""
Return the label position (left or right)
"""
return self.label_position
def set_label_position(self, position):
"""
Set the label position (left or right)
ACCEPTS: [ 'left' | 'right' ]
"""
assert position == 'left' or position == 'right'
if position == 'right':
self.label.set_horizontalalignment('left')
else:
self.label.set_horizontalalignment('right')
self.label_position=position
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
if not self._autolabelpos: return
x,y = self.label.get_position()
if self.label_position == 'left':
if not len(bboxes):
left = self.axes.bbox.xmin
else:
bbox = mtransforms.Bbox.union(bboxes)
left = bbox.x0
self.label.set_position( (left-self.LABELPAD*self.figure.dpi/72.0, y))
else:
if not len(bboxes2):
right = self.axes.bbox.xmax
else:
bbox = mtransforms.Bbox.union(bboxes2)
right = bbox.x1
self.label.set_position( (right+self.LABELPAD*self.figure.dpi/72.0, y))
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x,y = self.offsetText.get_position()
top = self.axes.bbox.ymax
self.offsetText.set_position((x, top+self.OFFSETTEXTPAD*self.figure.dpi/72.0))
def set_offset_position(self, position):
assert position == 'left' or position == 'right'
x,y = self.offsetText.get_position()
if position == 'left': x = 0
else: x = 1
self.offsetText.set_ha(position)
self.offsetText.set_position((x,y))
def get_text_widths(self, renderer):
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
left = 0.0
if bbox.width:
left += bbox.width + padPixels
right = 0.0
if bbox2.width:
right += bbox2.width + padPixels
if self.get_label_position() == 'left':
left += self.label.get_window_extent(renderer).width + padPixels
else:
right += self.label.get_window_extent(renderer).width + padPixels
return left, right
def set_ticks_position(self, position):
"""
Set the ticks position (left, right, both or default)
both sets the ticks to appear on both positions, but
does not change the tick labels.
default resets the tick positions to the default:
ticks on both positions, labels on the left.
ACCEPTS: [ 'left' | 'right' | 'both' | 'default' | 'none' ]
"""
assert position in ('left', 'right', 'both', 'default', 'none')
ticks = list( self.get_major_ticks() ) # a copy
ticks.extend( self.get_minor_ticks() )
if position == 'right':
self.set_offset_position('right')
for t in ticks:
t.tick1On = False
t.tick2On = True
t.label1On = False
t.label2On = True
elif position == 'left':
self.set_offset_position('left')
for t in ticks:
t.tick1On = True
t.tick2On = False
t.label1On = True
t.label2On = False
elif position == 'default':
self.set_offset_position('left')
for t in ticks:
t.tick1On = True
t.tick2On = True
t.label1On = True
t.label2On = False
elif position == 'none':
for t in ticks:
t.tick1On = False
t.tick2On = False
else:
self.set_offset_position('left')
for t in ticks:
t.tick1On = True
t.tick2On = True
def tick_right(self):
'use ticks only on right'
self.set_ticks_position('right')
def tick_left(self):
'use ticks only on left'
self.set_ticks_position('left')
def get_ticks_position(self):
"""
Return the ticks position (left, right, both or unknown)
"""
majt=self.majorTicks[0]
mT=self.minorTicks[0]
majorRight=(not majt.tick1On) and majt.tick2On and (not majt.label1On) and majt.label2On
minorRight=(not mT.tick1On) and mT.tick2On and (not mT.label1On) and mT.label2On
if majorRight and minorRight: return 'right'
majorLeft=majt.tick1On and (not majt.tick2On) and majt.label1On and (not majt.label2On)
minorLeft=mT.tick1On and (not mT.tick2On) and mT.label1On and (not mT.label2On)
if majorLeft and minorLeft: return 'left'
majorDefault=majt.tick1On and majt.tick2On and majt.label1On and (not majt.label2On)
minorDefault=mT.tick1On and mT.tick2On and mT.label1On and (not mT.label2On)
if majorDefault and minorDefault: return 'default'
return 'unknown'
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervaly
def set_view_interval(self, vmin, vmax, ignore=False):
if ignore:
self.axes.viewLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.axes.viewLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
def get_minpos(self):
return self.axes.dataLim.minposy
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervaly
def set_data_interval(self, vmin, vmax, ignore=False):
'return the Interval instance for this axis data limits'
if ignore:
self.axes.dataLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_data_interval()
self.axes.dataLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
| gpl-3.0 |
scenarios/tensorflow | tensorflow/examples/learn/iris_val_based_early_stopping.py | 62 | 2827 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with early stopping."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
learn = tf.contrib.learn
def clean_folder(folder):
"""Cleans the given folder if it exists."""
try:
shutil.rmtree(folder)
except OSError:
pass
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
x_train, x_val, y_train, y_val = train_test_split(
x_train, y_train, test_size=0.2, random_state=42)
val_monitor = learn.monitors.ValidationMonitor(
x_val, y_val, early_stopping_rounds=200)
model_dir = '/tmp/iris_model'
clean_folder(model_dir)
# classifier with early stopping on training data
classifier1 = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir)
classifier1.fit(x=x_train, y=y_train, steps=2000)
predictions1 = list(classifier1.predict(x_test, as_iterable=True))
score1 = metrics.accuracy_score(y_test, predictions1)
model_dir = '/tmp/iris_model_val'
clean_folder(model_dir)
# classifier with early stopping on validation data, save frequently for
# monitor to pick up new checkpoints.
classifier2 = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir,
config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1))
classifier2.fit(x=x_train, y=y_train, steps=2000, monitors=[val_monitor])
predictions2 = list(classifier2.predict(x_test, as_iterable=True))
score2 = metrics.accuracy_score(y_test, predictions2)
# In many applications, the score is improved by using early stopping
print('score1: ', score1)
print('score2: ', score2)
print('score2 > score1: ', score2 > score1)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
akhilaananthram/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_mixed.py | 70 | 3776 | from matplotlib._image import frombuffer
from matplotlib.backends.backend_agg import RendererAgg
class MixedModeRenderer(object):
"""
A helper class to implement a renderer that switches between
vector and raster drawing. An example may be a PDF writer, where
most things are drawn with PDF vector commands, but some very
complex objects, such as quad meshes, are rasterised and then
output as images.
"""
def __init__(self, width, height, dpi, vector_renderer, raster_renderer_class=None):
"""
width: The width of the canvas in logical units
height: The height of the canvas in logical units
dpi: The dpi of the canvas
vector_renderer: An instance of a subclass of RendererBase
that will be used for the vector drawing.
raster_renderer_class: The renderer class to use for the
raster drawing. If not provided, this will use the Agg
backend (which is currently the only viable option anyway.)
"""
if raster_renderer_class is None:
raster_renderer_class = RendererAgg
self._raster_renderer_class = raster_renderer_class
self._width = width
self._height = height
self.dpi = dpi
assert not vector_renderer.option_image_nocomposite()
self._vector_renderer = vector_renderer
self._raster_renderer = None
self._rasterizing = 0
self._set_current_renderer(vector_renderer)
_methods = """
close_group draw_image draw_markers draw_path
draw_path_collection draw_quad_mesh draw_tex draw_text
finalize flipy get_canvas_width_height get_image_magnification
get_texmanager get_text_width_height_descent new_gc open_group
option_image_nocomposite points_to_pixels strip_math
""".split()
def _set_current_renderer(self, renderer):
self._renderer = renderer
for method in self._methods:
if hasattr(renderer, method):
setattr(self, method, getattr(renderer, method))
renderer.start_rasterizing = self.start_rasterizing
renderer.stop_rasterizing = self.stop_rasterizing
def start_rasterizing(self):
"""
Enter "raster" mode. All subsequent drawing commands (until
stop_rasterizing is called) will be drawn with the raster
backend.
If start_rasterizing is called multiple times before
stop_rasterizing is called, this method has no effect.
"""
if self._rasterizing == 0:
self._raster_renderer = self._raster_renderer_class(
self._width*self.dpi, self._height*self.dpi, self.dpi)
self._set_current_renderer(self._raster_renderer)
self._rasterizing += 1
def stop_rasterizing(self):
"""
Exit "raster" mode. All of the drawing that was done since
the last start_rasterizing command will be copied to the
vector backend by calling draw_image.
If stop_rasterizing is called multiple times before
start_rasterizing is called, this method has no effect.
"""
self._rasterizing -= 1
if self._rasterizing == 0:
self._set_current_renderer(self._vector_renderer)
width, height = self._width * self.dpi, self._height * self.dpi
buffer, bounds = self._raster_renderer.tostring_rgba_minimized()
l, b, w, h = bounds
if w > 0 and h > 0:
image = frombuffer(buffer, w, h, True)
image.is_grayscale = False
image.flipud_out()
self._renderer.draw_image(l, height - b - h, image, None)
self._raster_renderer = None
self._rasterizing = False
| agpl-3.0 |
heli522/scikit-learn | examples/missing_values.py | 233 | 3056 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
yashchandak/GNN | Sample_Run/Dynamic_Bi/__main__.py | 1 | 24642 | from __future__ import print_function
import os.path
import time, math, sys
from copy import deepcopy
import scipy.sparse as sps
from scipy.io import loadmat
import numpy as np
from sklearn.preprocessing import normalize
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
import blogDWdata as input_data
import network as architecture
import Config as conf
import Eval_Calculate_Performance as perf
from Utils import labels_to_onehot, sample
from copy import deepcopy
#import Eval_MLP as NN
import Eval_linear as liblinear
import Eval_Config
cfg = conf.Config()
#Code structure inspired from Stanford's cs224d assignment starter codes
#class DNN(Model):
class RNNLM_v1(object):
def __init__(self, config):
self.config = config
# Generate placeholders for the images and labels.
self.load_data()
self.add_placeholders()
#self.add_metrics()
# Build model
self.arch = self.add_network(config)
self.inputs = self.arch.embedding(self.data_placeholder)
self.rnn_outputs = self.arch.predict(self.inputs,self.keep_prob, self.seq_len)
self.outputs = self.arch.projection(self.rnn_outputs)
# casting to handle numerical stability
self.predictions_next = [tf.nn.softmax(tf.cast(o, 'float64')) for o in self.outputs[0]]
# Reshape the output into len(vocab) sized chunks - the -1 says as many as
# needed to evenly divide
output_next = tf.reshape(tf.concat(1, self.outputs[0]), [-1, self.config.data_sets._len_vocab])
#output_label = tf.reshape(tf.concat(1, self.outputs[1]), [-1, self.config.data_sets._len_labels])
output_label = self.outputs[1]
self.loss = self.arch.loss([output_next, output_label], self.label_placeholder, self.label_2_placeholder, self.inputs, self.data_placeholder)
self.optimizer = self.config.solver._parameters['optimizer']
self.train = self.arch.training(self.loss,self.optimizer)
self.saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
self.summary = tf.summary.merge_all()
self.step_incr_op = self.arch.global_step.assign(self.arch.global_step+1)
#local variable initialization required for metrics operation, otherwise throws error
# self.init = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())
self.init = tf.global_variables_initializer()#tf.initialize_all_variables()
def predict_results(self,sess, all_labels, return_labels = False):
labels_orig, data = [], []
for k,v in all_labels.items():
labels_orig.append(v)
data.append([k])
#Replicate data on 2nd axis to meet the dimensions of data placeholder
#But since dynamic RNNs are used, only lengths of 'seq_length' are evaluated :)
data = np.tile(data, (1, self.config.num_steps))
feed_dict = {self.data_placeholder: data, self.keep_prob: 1, self.arch.initial_state: self.arch.initial_state.eval(), self.seq_len: [1]*len(data)}
labels_pred = sess.run(self.arch.label_sigmoid, feed_dict=feed_dict)[0]
if return_labels:
return labels_pred
else:
return perf.evaluate(labels_pred, labels_orig, 0)
def load_data(self):
# Get the 'encoded data'
self.data_sets = input_data.read_data_sets(self.config)
debug = self.config.debug
if debug:
print('##############--------- Debug mode ')
num_debug = (self.config.num_steps+1)*128
self.data_sets.train._x = self.data_sets.train._x[:num_debug]
self.data_sets.validation._x = self.data_sets.validation._x[:num_debug]
#self.data_sets.test_x = self.data_sets.test_x[:num_debug]
self.config.data_sets._len_vocab = self.data_sets.train.vocab.__len__()
l = len(list(self.data_sets.train.labels.values())[0])
self.config.data_sets._len_labels= l
print('--------- Project Path: '+self.config.codebase_root_path+self.config.project_name)
print('--------- Vocabulary Length: '+str(self.config.data_sets._len_vocab))
print('--------- Label Length: '+str(self.config.data_sets._len_labels))
print('--------- No. of Labelled nodes: ' + str(len(self.data_sets.train.labels.keys())))
def add_placeholders(self):
self.data_placeholder = tf.placeholder(tf.int32,shape=[None,self.config.num_steps], name='Input')
self.label_placeholder = tf.placeholder(tf.int32,name='Target')
self.label_2_placeholder = tf.placeholder(tf.int32,name='Target_label')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.seq_len = tf.placeholder(tf.int32, shape=[None], name='Seq_len')
#self.metrics = tf.placeholder(tf.float32,shape=(len(self.config.metrics),))
def create_feed_dict(self, input_batch, label_batch, label_batch_2, seq_len):
feed_dict = {
self.data_placeholder: input_batch,
self.label_placeholder: label_batch,
self.label_2_placeholder: label_batch_2,
self.seq_len: seq_len
}
return feed_dict
def add_network(self, config):
return architecture.Network(config)
def add_metrics(self, metrics):
"""assign and add summary to a metric tensor"""
for i,metric in enumerate(self.config.metrics):
tf.summary.scalar(metric, metrics[i])
def add_summaries(self,sess):
# Instantiate a SummaryWriter to output summaries and the Graph.
self.summary_writer_train = tf.train.SummaryWriter(self.config.logs_dir+"train", sess.graph)
self.summary_writer_val = tf.train.SummaryWriter(self.config.logs_dir+"val", sess.graph)
def write_summary(self,sess,summary_writer, metric_values, step, feed_dict):
summary = self.summary.merged_summary
#feed_dict[self.loss]=loss
feed_dict[self.metrics]=metric_values
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
def run_epoch(self, sess, dataset, train_op=None, summary_writer=None,verbose=1000):
if not train_op :
train_op = tf.no_op()
keep_prob = 1
else:
keep_prob = self.config.architecture._dropout
# And then after everything is built, start the training loop.
total_loss = []
next_loss = []
label_loss = []
sim_loss = []
emb_loss = []
grads = []
f1_micro, f1_macro = [], []
total_steps = sum(1 for x in dataset.next_batch(self.config.batch_size,self.config.num_steps))
#Sets to state to zero for a new epoch
state = self.arch.initial_state.eval()
for step, (input_batch, label_batch, label_batch_2, seq_len) in enumerate(
dataset.next_batch(self.config.batch_size,self.config.num_steps)):
#print("\n\n\nActualLabelCount: ", input_batch, label_batch, label_batch_2, seq_len, np.sum(label_batch_2, axis=2))
feed_dict = self.create_feed_dict(input_batch, label_batch, label_batch_2, seq_len)
feed_dict[self.keep_prob] = keep_prob
#Set's the initial_state temporarily to the previous final state for the session "AWESOME" -- verified
#feed_dict[self.arch.initial_state] = state
#Writes loss summary @last step of the epoch
if (step+1) < total_steps:
_, loss_value, state, pred_labels = sess.run([train_op, self.loss, self.arch.final_state, self.arch.label_sigmoid], feed_dict=feed_dict)
else:
_, loss_value, state, summary, pred_labels = sess.run([train_op, self.loss, self.arch.final_state,self.summary,self.arch.label_sigmoid], feed_dict=feed_dict)
if summary_writer != None:
summary_writer.add_summary(summary,self.arch.global_step.eval(session=sess))
summary_writer.flush()
#print(loss_value)
total_loss.append(loss_value[0])
next_loss.append(loss_value[1])
label_loss.append(loss_value[2])
sim_loss.append(loss_value[3])
emb_loss.append(loss_value[4])
#print(loss_value[5])
grads.append(np.mean(loss_value[5][0]))
#print("\n\n\nPredLabels:", pred_labels)
if verbose and step % verbose == 0:
metrics = [0]*20
if self.config.solver._curr_label_loss:
# metrics = perf.evaluate(pred_labels, label_batch_2, 0)
metrics = self.predict_results(sess, dataset.labels)
self.add_metrics(metrics)
f1_micro.append(metrics[3])
f1_macro.append(metrics[4])
print('%d/%d : pp = %0.3f : next = %0.3f : label = %0.3f : micro-F1 = %0.3f : macro-F1 = %0.3f : sim = %0.3f : emb = %0.3f : grads = %0.12f'%(step, total_steps, np.exp(np.mean(total_loss)), np.mean(next_loss), np.mean(label_loss), np.mean(f1_micro), np.mean(f1_macro), np.mean(sim_loss), np.mean(emb_loss), np.mean(grads)), end="\r")
sys.stdout.flush()
if verbose:
sys.stdout.write('\r')
return np.exp(np.mean(total_loss)),np.mean(total_loss), np.mean(f1_micro), np.mean(f1_macro)
def fit(self, sess):
#define parametrs for early stopping early stopping
max_epochs = self.config.max_epochs
patience = self.config.patience # look as this many examples regardless
patience_increase = self.config.patience_increase # wait this much longer when a new best is found
improvement_threshold = self.config.improvement_threshold # a relative improvement of this much is
# considered significant
# go through this many minibatches before checking the network on the validation set
# Here we check every epoch
validation_loss = 1e6
done_looping = False
step = 1
best_step = -1
losses = []
learning_rate = self.config.solver._parameters['learning_rate']
#sess.run(self.init) #DO NOT DO THIS!! Doesn't restart from checkpoint
while (step <= self.config.max_epochs) and (not done_looping):
#print 'Epoch {}'.format(epoch)
#step_incr_op = tf.assign_add(self.global_step,1)
sess.run([self.step_incr_op])
epoch = self.arch.global_step.eval(session=sess)
start_time = time.time()
tr_pp, average_loss, tr_micro, tr_macro = self.run_epoch(sess,self.data_sets.train,train_op=self.train,summary_writer=self.summary_writer_train)
duration = time.time() - start_time
if (epoch % self.config.val_epochs_freq == 0):
val_pp,val_loss, val_micro, val_macro = self.run_epoch(sess,self.data_sets.validation,summary_writer=self.summary_writer_val)
print('\nEpoch %d: tr_loss = %.2f, val_loss = %.2f || tr_pp = %.2f, val_pp = %.2f || tr_micro = %.2f, val_micro = %.2f || tr_macro = %.2f, val_macro = %.2f (%.3f sec)'
% (epoch, average_loss, val_loss, tr_pp, val_pp, tr_micro, val_micro, tr_macro, val_macro, duration))
# Save model only if the improvement is significant
if (val_loss < validation_loss * improvement_threshold) and (epoch > self.config.save_epochs_after):
patience = max(patience, epoch * patience_increase)
validation_loss = val_loss
checkpoint_file = self.config.ckpt_dir + 'checkpoint'
self.saver.save(sess, checkpoint_file, global_step=epoch)
best_step = epoch
patience = epoch + max(self.config.val_epochs_freq,self.config.patience_increase)
#print('best step %d'%(best_step))
elif val_loss > validation_loss * improvement_threshold:
patience = epoch - 1
else:
# Print status to stdout.
print('Epoch %d: loss = %.2f pp = %.2f (%.3f sec)' % (epoch, average_loss, tr_pp, duration))
if (patience <= epoch):
#config.val_epochs_freq = 2
learning_rate = learning_rate / 10
self.optimizer = tf.train.AdamOptimizer(learning_rate)
patience = epoch + max(self.config.val_epochs_freq,self.config.patience_increase)
print('--------- Learning rate dropped to: %f'%(learning_rate))
if learning_rate <= 0.0000001:
print('Stopping by patience method')
done_looping = True
losses.append(average_loss)
step += 1
return losses, best_step
def get_embedding(self,sess,data, layer = 0):
if layer == 0:
feed_dict = {self.data_placeholder: [data], self.keep_prob: 1, self.arch.initial_state: self.arch.initial_state.eval()}
return sess.run(self.inputs,feed_dict=feed_dict)[0]
if layer == 1:
feed_dict = {self.data_placeholder: [data], self.keep_prob: 1, self.arch.initial_state: self.arch.initial_state.eval(), self.seq_len:[1]}
return sess.run(self.rnn_outputs, feed_dict=feed_dict)[0]
else:
print("Undefined layer")
return
def get_hidden_state(self,sess,data,eos_embed=None):
if eos_embed is None:
eos_embed = self.arch.initial_state.eval()
feed_dict = {self.data_placeholder: [data], self.keep_prob: 1, self.arch.initial_state: eos_embed, self.seq_len:[1]}
return sess.run(self.rnn_outputs,feed_dict=feed_dict)[0]
def generate_text(self,session, starting_text='<eos>',stop_length=100, stop_tokens=None, temp=1.0 ):
"""Generate text from the model.
Args:
session: tf.Session() object
starting_text: Initial text passed to model.
Returns:
output: List of word idxs
"""
state = self.arch.initial_state.eval()
# Imagine tokens as a batch size of one, length of len(tokens[0])
tokens = [self.data_sets.train.vocab.encode(word) for word in starting_text.split()]
all_labels = []
for i in range(stop_length):
feed = {self.data_placeholder: [tokens[-1:]], self.arch.initial_state: state, self.keep_prob: 1}
state, y_pred, embed, pred_labels = session.run([self.arch.final_state, self.predictions_next[-1],self.inputs, self.arch.label_sigmoid], feed_dict=feed)
state = state[0]
all_labels.append(pred_labels[0][0]) #batch-0, seq number-0
next_word_idx = sample(y_pred[0], temperature=temp)
tokens.append(next_word_idx)
if stop_tokens and self.data_sets.train.vocab.decode(tokens[-1]) in stop_tokens:
break
output = [self.data_sets.train.vocab.decode(word_idx) for word_idx in tokens]
#Print out the next nodes and corresponding labels
#print("labels and nodes are both incremented by 1 as compared to original dataset")
#for step, labels in enumerate(all_labels):
# temp = []
# for idx, val in enumerate(labels):
# if val>0.25:
# temp.append(idx)
# print(output[step], ": ", temp)
return output
#def generate_sentence(self,session,starting_text,temp):
def generate_sentence(self,session,*args, **kwargs):
"""Convenice to generate a sentence from the model."""
return self.generate_text(session, *args, stop_tokens=['<eos>'], **kwargs)
########END OF CLASS MODEL#############################################################################################################
def init_Model(config):
tf.reset_default_graph()
with tf.variable_scope('RNNLM',reuse=None) as scope:
model = RNNLM_v1(config)
tfconfig = tf.ConfigProto( allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
sm = tf.train.SessionManager()
if config.retrain:
load_ckpt_dir = config.ckpt_dir
print('--------- Loading variables from checkpoint if available')
else:
load_ckpt_dir = ''
print('--------- Training from scratch')
sess = sm.prepare_session("", init_op=model.init, saver=model.saver, checkpoint_dir=load_ckpt_dir,config=tfconfig)
return model, sess
def train_DNNModel():
#global cfg
print('############## Training Module ')
config = deepcopy(cfg)
model,sess = init_Model(config)
with sess:
model.add_summaries(sess)
losses, best_step = model.fit(sess)
return losses
def test_DNNModel():
#global cfg
print('############## Test Module ')
config = deepcopy(cfg)
model,sess = init_Model(config)
with sess:
test_pp = model.run_epoch(sess,model.data_sets.validation)
print('=-=' * 5)
print('Test perplexity: {}'.format(test_pp))
print('=-=' * 5)
def interactive_generate_text_DNNModel():
#global cfg
print('############## Generate Text Module ')
config = deepcopy(cfg)
config.batch_size = config.num_steps = 1
model,sess = init_Model(config)
with sess:
starting_text = '2'
while starting_text:
print(' '.join(model.generate_sentence(sess, starting_text=starting_text, temp=1.0)))
starting_text = input('> ')
def dump_generate_text_DNNModel():
global cfg
print('############## Generate sentences for all words in dictionary and Dump ')
config = deepcopy(cfg)
config.batch_size = config.num_steps = 1
model,sess = init_Model(config)
num_sentences = 2
with sess:
ignore_list = ['0','<eos>','<unk>']
keys = [int(word) for word in model.data_sets.train.vocab.word_freq.keys() if word not in ignore_list]
keys.sort()
vocab_len = len(keys)
f_id = config.dataset_name+'/_data.sentences','w'
for starting_text in keys:
for n in range(num_sentences):
words = model.generate_sentence(sess, starting_text=str(starting_text), temp=1.0)
f_id.write((' '.join(words[:-1])+'\n'))
def save_Embeddings_DNNModel():
#global cfg
print('############## Save Embeddings Module ')
config = deepcopy(cfg)
config.batch_size = config.num_steps = 1
model,sess = init_Model(config)
with sess:
model.add_summaries(sess)
ignore_list = ['0','<eos>','<unk>']
keys = [int(word) for word in model.data_sets.train.vocab.word_freq.keys() if word not in ignore_list]
keys.sort()
vocab_len = len(keys)
enc_words = np.array([model.data_sets.train.vocab.encode(str(word)) for word in keys])
#embed = np.zeros([vocab_len,model.config.mRNN._embed_size])
embed = np.zeros([vocab_len,model.config.mRNN._hidden_size])
#eos_embed = model.get_embedding(sess,['<eos>'])
eos_embed = model.get_hidden_state(sess,[model.data_sets.train.vocab.encode('<eos>')],None)
for i,word in enumerate(enc_words):
embed[i] = model.get_embedding(sess,[word],)
#embed[i] = model.get_hidden_state(sess,[word],eos_embed)
fn = config.embed_dir+config.dataset_name+'_data.embd'
np.savetxt(fn,embed, delimiter=',')
#np.savetxt(fn,normalize(embed,norm='l2',axis=1), delimiter=',')
print('--------- Embeddings are saved to '+fn)
def save_embed(path, embed): #UNUSED
f = open(path, 'w')
for idx, item in enumerate(embed):
f.write(str(idx))
for val in item:
f.write(' ' + str(val))
f. write('\n')
f.close()
def visualize_Embeddings_DNNModel():
#global cfg
print('############## Visualize Embeddings Module ')
config = deepcopy(cfg)
tf.reset_default_graph()
sess = tf.Session()
fn = config.embed_dir+config.dataset_name+'_data.embd'
#fn = config.embed_dir+'karate_structure_features'
print('--------- Embeddings are loaded from dir: '+fn)
embed = np.loadtxt(fn,delimiter=',')
embed_var = tf.Variable(embed,name='embed_var')
init = tf.initialize_all_variables()
sess.run(init)
checkpoint_file = config.logs_dir, 'Embedding'
saver = tf.train.Saver({"embedding": embed_var},write_version=tf.train.SaverDef.V2)
fn = config.embed_dir+'embedding_ckpt'
saver.save(sess,fn, global_step=1)
print('--------- To Visualize Embeddings load tf:0.12v tensorboard in directory: '+fn)
def generate_and_reconstruct():
print('############## Reconstruct Text Module ')
config = deepcopy(cfg)
config.batch_size = config.num_steps = 1
model,sess = init_Model(config)
ignore_list = ['0','<eos>','<unk>']
keys = [word for word in model.data_sets.train.vocab.word_freq.keys() if word not in ignore_list]
nodes = len(keys)
#adj_mat = np.zeros((nodes, nodes), dtype=int)
adj_list = {}
walk_count = 10
with sess:
for idx, node in enumerate(keys):
if idx%100 == 0:
print("Reconstructing for node: ",idx)
for i in range(walk_count):
walk = model.generate_sentence(sess, starting_text=node, temp=1.0)
for n1, n2 in zip(walk[:-2], walk[1:-1]):
#Subtracting one to start node count from 0
n1, n2 = int(n1)-1, int(n2)-1
weight = adj_list.get((n1, n2), 0)
adj_list[(n1,n2)] = weight+1
#adj_mat[int(n1)-1][int(n2)-1] += 1
adj_mat = sps.lil_matrix((nodes, nodes))
for k, v in adj_list.items():
i,j = k
adj_mat[i,j] = v
#adj_mat = scipy.sparse.coo_matrix(adj_mat)
savemat(config.results_dir+'reconstructed_'+cfg.dataset_name, adj_mat)
print('------------ Reconstruction file saved: ', 'reconstructed_'+cfg.dataset_name )
def classify_and_save():
print('############## Classify and save Module ')
config = deepcopy(cfg)
fn = config.embed_dir+config.dataset_name+'_data.embd'
e_conf = Eval_Config.Config(config.dataset_name+'/', fn)
#NN.evaluate(e_conf)
liblinear.evaluate(e_conf)
print("------------ Results saved to: ", e_conf.results_folder)
def predict_and_save():
print('############## Save Label Prediction Module ')
config = deepcopy(cfg)
model,sess = init_Model(config)
vocab = model.data_sets.train.vocab
all_labels = loadmat(config.label_dir)['labels']
nodes = all_labels.shape[0]
all_labels = input_data.get_labels(all_labels, [True]*nodes, vocab)
pred_labels = model.predict_results(sess, all_labels, return_labels=True)
ordered_labels = np.zeros(all_labels.shape)\
#Re-order the predictions based on actual node number
#pred_labels are in order of keys sequence of all_labels
for idx, k in enumerate(all_labels.keys()):
ordered_labels[int(vocab.decode(k)) - 1] = pred_labels[idx]
#Ignore the first column of label prediction (It is used for marking <EOS> and unlabeled data)
ordered_labels = ordered_labels[:,1:]
fn = config.result_dir+config.dataset_name+'_predicted_labels.csv'
np.savetxt(fn, ordered_labels, delimiter=',')
def execute():
with tf.device('/gpu:0'):
err = train_DNNModel()
#test_DNNModel()
#interactive_generate_text_DNNModel()
save_Embeddings_DNNModel()
visualize_Embeddings_DNNModel()
#generate_and_reconstruct()
classify_and_save()
predict_and_save()
return err
if __name__ == "__main__":
#remove parameter dictionary
meta_param = {#('dataset_name',):['blogcatalog_ncc'],
#('solver', 'learning_rate'): [0.001],
#('retrain',): [False],
('debug',): [False],
('max_epochs',): [1000]
}
variations = len(meta_param[('debug',)])
#Make sure number of variants are equal
for k,v in meta_param.items():
assert len(v) == variations
for idx in range(variations):
for k,vals in meta_param.items():
x = cfg
if len(k) > 1:
x = getattr(x, k[0])
setattr(x, k[-1], vals[idx])
print(k[-1], vals[idx])
cfg.create(cfg.dataset_name)#"run-"+str(idx))
cfg.init2()
#All set... GO!
execute()
print('\n\n ===================== \n\n')
| mit |
vshtanko/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
meysam9323/Harmonic-Model | src/HM.py | 1 | 20916 | """
written by: Meysam Asgari 15/12/2013 ; CSLU / OHSU
## common speech processing functions
"""
import numpy as np , pdb ,os , struct
import scipy.spatial.distance as dist
from scipy import linalg as LA
from lib import *
# import matplotlib
# matplotlib.use("PDF")
# from matplotlib import mlab as mlab, pylab as plt
execfile('src/cfg.py')
def Bases (nbasis , FL):
I = nbasis # number of basis functions
seg = I-1
len_b = 2*seg + np.fix(FL/seg)*seg
len_w = len_b/seg
ham = np.hamming(2*len_w)
basis = np.zeros((len_b,I))
basis[0:len_w , 0] = ham[len_w :]
for i in range(I-2):
basis[i*len_w : i*len_w + 2*len_w ,i+1] = ham
basis[len_b-len_w : ,I-1] = ham[0:len_w]
bases = basis[0:int(FL),:]
return bases
def genPseudoInvMatsVarToFile( winLen ,Ainv=None , Fmin = F0_MIN , Fmax = F0_MAX , F0=None):
global TinvMat
TinvMat = TinvMatDir +'TinvMat_'+ TYPE_of_MODEL+'_'+str(fs)+'Hz'+'_'+str(winLen)+'FL'+'_'+'F0res'+str(F0_RES)+'Hz_'+'F0min'+str(F0_MIN)+'Hz_'+'F0max'+str(F0_MAX)+'Hz_'+'nH'+str(NUMBER_OF_HARMONICS)+'_'+WINDOW+'.bin'
if not os.path.isfile(TinvMat):
pInvMatsVar = []
Basis = Bases ( NUM_BASES_FUN , winLen)
z = np.ones((winLen, 1))
nH = NUMBER_OF_HARMONICS #number of Harmonics
I = len(Basis[0,:])
D1 = np.zeros((winLen,nH*I))
D2 = np.zeros((winLen,nH*I))
fzero= np.arange(Fmin, Fmax+1, F0_RES, dtype=float)
T_len = winLen*winLen
T_vec = np.empty((T_len*len(fzero) , ))
t1= np.arange(1, winLen+1, 1, dtype=float)
t1=t1/fs
t2=np.arange(0, nH+1, 1, dtype=float)
X1, Y1 = np.meshgrid(t2,t1)
if WINDOW == 'hanning':
win = np.hanning(winLen)
W = np.diag(win)
elif WINDOW == 'rect':
win = np.ones((winLen),)
W = np.diag(win)
if F0!=None:
fzer0 = F0
for q in range(len(fzero)):
omega=2*np.pi*fzero[q]
Cos=np.cos(omega*X1*Y1) # WIN_LEN x N_HRMNICS
Sin=np.sin(omega*X1*Y1) # WIN_LEN x N_HRMNICS
# Harmonics with time varying amplitudes, composed of I basis
for t in range(winLen):
for h in range(nH):
D1[t][h*I:(h+1)*I] = Cos[t,h] * Basis[t] #D1: M x (nH*I)
D2[t][h*I:(h+1)*I] = Sin[t,h] * Basis[t] #D1: M x (nH*I)
A = np.concatenate((z, np.concatenate((D1, D2), 1)), 1)
A = np.dot(W,A)
rc = np.max(np.shape(A))*np.max(np.linalg.svd(A)[1])*eps
PinvA = np.linalg.pinv(A, rcond=rc)
P = np.dot(A , np.linalg.pinv(A, rcond=rc))
T_vec [q*T_len : (q+1)*T_len] = P.flatten()
writeBin(TinvMat , T_vec)
def genPseudoInvMatsVar( winLen ,Ainv=None , Fmin = F0_MIN , Fmax = F0_MAX , F0=None):
pInvMatsVar = []
AInvMats = []
Basis = Bases ( NUM_BASES_FUN , winLen)
z = np.ones((winLen, 1))
nH = NUMBER_OF_HARMONICS #number of Harmonics
I = len(Basis[0,:])
D1 = np.zeros((winLen,nH*I))
D2 = np.zeros((winLen,nH*I))
fzero= np.arange(Fmin, Fmax+1, F0_RES, dtype=float)
t1= np.arange(1, winLen+1, 1, dtype=float)
t1=t1/fs
t2=np.arange(0, nH+1, 1, dtype=float)
X1, Y1 = np.meshgrid(t2,t1)
if WINDOW == 'hanning':
win = np.hanning(winLen)
W = np.diag(win)
elif WINDOW == 'rect':
win = np.ones((winLen),)
W = np.diag(win)
if F0!=None:
fzer0 = F0
for q in range(len(fzero)):
omega=2*np.pi*fzero[q]
Cos=np.cos(omega*X1*Y1) # WIN_LEN x N_HRMNICS
Sin=np.sin(omega*X1*Y1) # WIN_LEN x N_HRMNICS
# Harmonics with time varying amplitudes, composed of I basis
for t in range(winLen):
for h in range(nH):
D1[t][h*I:(h+1)*I] = Cos[t,h] * Basis[t] #D1: M x (nH*I)
D2[t][h*I:(h+1)*I] = Sin[t,h] * Basis[t] #D1: M x (nH*I)
A = np.concatenate((z, np.concatenate((D1, D2), 1)), 1)
A = np.dot(W,A)
rc = np.max(np.shape(A))*np.max(np.linalg.svd(A)[1])*eps
PinvA = np.linalg.pinv(A, rcond=rc)
P = np.dot(A , np.linalg.pinv(A, rcond=rc))
pInvMatsVar.append(P)
AInvMats.append(PinvA)
if Ainv != None:
return pInvMatsVar , AInvMats
else: return pInvMatsVar
def genPseudoInvMats( winLen , Ainv=None , window=None , Fmin = F0_MIN , Fmax = F0_MAX):
'''Compute and cache pInvMats for all candidate pitch values'''
pInvMats = []
AInvMats = []
fzero= np.arange(Fmin, Fmax+1, F0_RES, dtype=float)
t1= np.arange(1, winLen+1, 1, dtype=float)
t1=t1/fs
t2=np.arange(0, NUMBER_OF_HARMONICS+1, 1, dtype=float)
X1, Y1 = np.meshgrid(t2,t1)
if WINDOW == 'hanning':
win = np.hanning(winLen)
W = np.diag(win)
elif WINDOW == 'rect':
win = np.ones((winLen),)
W = np.diag(win)
for q in range(len(fzero)):
omega=2*np.pi*fzero[q]
Cos=np.cos(omega*X1*Y1) # WIN_LEN x N_HRMNICS
Sin=np.sin(omega*X1*Y1) # WIN_LEN x N_HRMNICS
A=np.c_[Cos,Sin[:,1:]]
A = np.dot(W,A)
rc = np.max(np.shape(A))*np.max(np.linalg.svd(A)[1])*eps
PinvA = np.linalg.pinv(A, rcond=rc)
P = np.dot(A , np.linalg.pinv(A, rcond=rc))
pInvMats.append(P)
AInvMats.append(PinvA)
if Ainv != None:
return pInvMats , AInvMats
else: return pInvMats
def genPseudoInvMatsToFile( winLen , Fmin = F0_MIN , Fmax = F0_MAX):
'''Compute and cache pInvMats for all candidate pitch values'''
global TinvMat
TinvMat = TinvMatDir +'TinvMat_'+ TYPE_of_MODEL+'_'+str(fs)+'Hz'+'_'+str(winLen)+'FL'+'_'+'F0res'+str(F0_RES)+'Hz_'+'F0min'+str(F0_MIN)+'Hz_'+'F0max'+str(F0_MAX)+'Hz_'+'nH'+str(NUMBER_OF_HARMONICS)+'_'+WINDOW+'.bin'
if not os.path.isfile(TinvMat):
pInvMats = []
fzero= np.arange(Fmin, Fmax+1, F0_RES, dtype=float)
T_len = (2*NUMBER_OF_HARMONICS +1)*winLen
T_vec = np.empty((T_len*len(fzero) , ))
t1= np.arange(1, winLen+1, 1, dtype=float)
t1=t1/fs
t2=np.arange(0, NUMBER_OF_HARMONICS+1, 1, dtype=float)
X1, Y1 = np.meshgrid(t2,t1)
if WINDOW == 'hanning':
win = np.hanning(winLen)
W = np.diag(win)
elif WINDOW == 'rect':
win = np.ones((winLen),)
W = np.diag(win)
for q in range(len(fzero)):
omega=2*np.pi*fzero[q]
Cos=np.cos(omega*X1*Y1) # WIN_LEN x N_HRMNICS
Sin=np.sin(omega*X1*Y1) # WIN_LEN x N_HRMNICS
A=np.c_[Cos,Sin[:,1:]]
A = np.dot(W,A)
tmp = np.linalg.inv(LA.sqrtm( np.dot(A.T,A) ) )
T = np.dot( tmp , A.T).real # P = np.dot(T.T ,T)
T_vec [q*T_len : (q+1)*T_len] = T.flatten()
writeBin(TinvMat , T_vec)
def genPinvA( winLen , Fmin = F0_MIN , Fmax = F0_MAX):
AInvMats = []
fzero= np.arange(Fmin, Fmax+1, F0_RES, dtype=float)
t1= np.arange(1, winLen+1, 1, dtype=float)
t1=t1/fs
t2=np.arange(0, NUMBER_OF_HARMONICS+1, 1, dtype=float)
X1, Y1 = np.meshgrid(t2,t1)
win = np.hanning(winLen)
W = np.diag(win)
for q in range(len(fzero)):
omega=2*np.pi*fzero[q]
Cos=np.cos(omega*X1*Y1) # WIN_LEN x N_HRMNICS
Sin=np.sin(omega*X1*Y1) # WIN_LEN x N_HRMNICS
A=np.c_[Cos,Sin[:,1:]]
A = np.dot(W,A)
rc = np.max(np.shape(A))*np.max(np.linalg.svd(A)[1])*eps
PinvA = np.linalg.pinv(A, rcond=rc)
AInvMats.append(PinvA)
return AInvMats
def genPseudoInvMats_map( winLen , Lambda , Mean , Fmin = F0_MIN , Fmax = F0_MAX ):
'''Compute and cache pInvMats for all candidate pitch values'''
pInvMats = []
penalty = []
fzero= np.arange( Fmin, Fmax+1, F0_RES, dtype=float)
t1= np.arange(1, winLen+1, 1, dtype=float)
t1=t1/fs
t2=np.arange(0, NUMBER_OF_HARMONICS+1, 1, dtype=float)
X1, Y1 = np.meshgrid(t2,t1)
if WINDOW == 'hanning':
win = np.hanning(winLen)
W = np.diag(win)
elif WINDOW == 'rect':
win = np.ones((winLen),)
W = np.diag(win)
for q in range(len(fzero)):
omega=2*np.pi*fzero[q]
Cos=np.cos(omega*X1*Y1) # WIN_LEN x N_HRMNICS
Sin=np.sin(omega*X1*Y1) # WIN_LEN x N_HRMNICS
A=np.c_[Cos,Sin[:,1:]]
A = np.dot(W,A)
rc = np.max(np.shape(A))*np.max(np.linalg.svd(A)[1])*eps
d = np.dot (A , np.linalg.inv(np.dot(A.T,A)+Lambda) )
P = np.dot( d , A.T)
pen = np.dot( np.dot(d , Lambda) , Mean)
pInvMats.append(P)
penalty.append(pen)
#pdb.set_trace()
return pInvMats , penalty
def silDetection(Frames):
if SIL_ENG:
pw = 10 * np.log10(np.mean(Frames * Frames,1))
sil = pw < SIL_THRESHOLD #silence detection
else:
sil = pw > 100
if ENTROPY :
entropy = entropyEstimation(Frames)
sil_entropy= entropy > ENTROPY_THR
sil = np.logical_or(sil,sil_entropy)
no_sil = np.logical_not(sil) # not silence (either voiced or unvoiced or even noise)
return np.where(sil==True)[0], np.where(no_sil==True)[0]
def getf0Features( frames , Fs , Fmin = F0_MIN , Fmax = F0_MAX , voiceProb= None , PitchDetction= None):
global fs
fs = Fs
nframe , FL = frames.shape
sil_ind , sp_ind = silDetection(frames)
noise_var , noise_var_min = NoiseVarianceEstimation(frames[sp_ind])
LL = -FL/2*np.log(noise_var)
#hard_pitch = F0_MIN + (np.argmax(LL,0))*F0_RES
#######################################################
if PitchDetction :
pitch = np.zeros(( nframe ))
pitch[sp_ind] = F0_MIN + F0_RES * viterbi(LL, genTransMat(SIGMA_TRANS \
, Fmin = F0_MIN , Fmax = F0_MAX ) , ret='pitch')
######################################################
vuvll = np.zeros((2,len(sp_ind)))
E_Y = np.sum(frames[sp_ind] ** 2, 1)
vuvll[0] = np.log(E_Y - noise_var_min ) # Reconsrtucted signal Energy (voiced state)
vuvll[1] = np.log( noise_var_min ) # Noise Energy ( unvoiced state)
vuvll[1] = vuvll[1,:] + VU_THRESHOLD
vuv = viterbi(vuvll, np.log(VUV_TRANSITION) , ret='vuv')
vuv1 = findUVerr(vuv) # smoothing
vIdx = np.nonzero(vuv == 1)[0]
uvIdx = np.nonzero(vuv == 0)[0]
vuv = np.zeros(( nframe ))
vuv[sp_ind] = vuv1
if voiceProb :
probvoice = vuvll[0,:]-vuvll[1,:] - VU_THRESHOLD
probvoice = featNormalize(probvoice,0.05,1)
prob_voice = np.zeros(( nframe ))
prob_voice[sp_ind] = probvoice
if np.all(np.isnan(prob_voice)):
print 'Nan in voicing probability '
if PitchDetction != None and voiceProb != None:
pitch[uvIdx]=0
return pitch , prob_voice , vuv
elif PitchDetction != None:
pitch[uvIdx]=0
return pitch , vuv
elif voiceProb != None:
return prob_voice , vuv
else: return vuv
def thr_adaptation(snr):
clean_thr = -.3
somewhat_noisy_thr = -.8
Noisy_thr = -1.2
veryNoisy_thr = -1.4
if snr <= .3 : THR = veryNoisy_thr
elif snr > .3 and snr <= .5 : THR = Noisy_thr
elif snr > .5 and snr <= 1 : THR = somewhat_noisy_thr
else: THR = clean_thr
return THR
def genHarCofStat(HarCoff):
#pdb.set_trace()
Mean = np.mean(HarCoff,axis =0)
b_Sigma = np.dot((HarCoff-Mean).T,(HarCoff-Mean))/(np.shape(HarCoff)[0]-1)
b_inv_Sig = np.linalg.inv(b_Sigma)
#Lambda_HM = b_inv_Sig*np.eye(len(b_inv_Sig))# why is not full cov?
return Mean , b_inv_Sig
def NoiseVarianceEstimation_map ( Frames , pInvMats , penalty, F_min=F0_MIN , F_max=F0_MAX ):
FL , nframe= np.shape(Frames)
pw = np.sum(Frames * Frames, 0)
cands = np.arange(F_min, F_max+1, F0_RES, dtype=float)
var = np.zeros((len(cands),nframe))
pitch = np.zeros((nframe, ))
for q in range(len(cands)):
Py = np.dot(pInvMats[q],Frames)
gamma = penalty[q]
for i in range(nframe):
rec_energy = np.dot( (Py[:,i]+gamma).T , (Py[:,i]+gamma) )
var[q][i] = ( pw[i] - rec_energy)
sigma = np.min( var , 0 )
# Convoloution of the LL
cll_conv = np.zeros((len(cands),nframe))
Wnd = np.hamming(HAM_WND)
cll = var
half_wnd = len(Wnd)/2-1
for i in range(nframe):
Conv = np.convolve(cll[:,i],Wnd,'valid')
tmp = np.ones(len(cands),)*np.max(Conv)
tmp[half_wnd : half_wnd + len(Conv)] = Conv
cll_conv[:,i] = np.sqrt(np.dot(cll[:,i],cll[:,i].T)/(np.dot(tmp,tmp.T)+eps))*tmp
return cll_conv , sigma
def NoiseVarianceEstimation ( Frames , F_min=F0_MIN , F_max=F0_MAX ):
nframe , FL = np.shape(Frames)
nframe , FL = np.shape(Frames)
if TYPE_of_MODEL == 'HM': genPseudoInvMatsToFile( FL , Fmin = F0_MIN , Fmax = F0_MAX)
elif TYPE_of_MODEL =='TVHM' : genPseudoInvMatsVarToFile( FL , Fmin = F0_MIN , Fmax = F0_MAX)
else: raise StandardError, " The model types has to be either HM or TVHM"
pw = np.sum(Frames * Frames, 1)
Wnd = np.hamming(HAM_WND)
half_wnd = len(Wnd)/2-1
cands = np.arange(F_min, F_max+1, F0_RES, dtype=float)
var = np.zeros((len(cands),nframe))
Tinv = readPinvMat ( TinvMat )
if TYPE_of_MODEL == 'HM':
for q in range(len(cands)):
Tmat = Tinv[q,:,:] #(2*nH+1)*FL the T matrix is loaded
for i in xrange(nframe):
rec = np.dot(Tmat ,Frames[i,:])
var[q][i] = pw[i] - np.dot(rec.T,rec)
elif TYPE_of_MODEL == 'TVHM':
for q in range(len(cands)):
P = Tinv[q,:,:]
#Py = np.dot(Tinv[q,:,:],Frames.T) # the P matrix is loaded
for i in range(nframe):
#var[q][i] = ( pw[i] - np.dot(Frames[i,:].T ,Py[:,i]) )
frm = Frames[i,:]
var[q][i] = ( np.dot(frm.T,frm) - np.dot(frm.T , np.dot(P,frm)) )
#### conv ####
cll_conv = np.zeros((len(cands),nframe))
Wnd = np.hamming(HAM_WND)
cll = var
half_wnd = len(Wnd)/2-1
for i in range(nframe):
Conv = np.convolve(cll[:,i],Wnd,'valid')
tmp = np.ones(len(cands),)*np.max(Conv)
tmp[half_wnd : half_wnd + len(Conv)] = Conv
cll_conv[:,i] = np.sqrt(np.dot(cll[:,i],cll[:,i].T)/(eps+np.dot(tmp,tmp.T)))*tmp
sigma = np.min( var , 0 )
return cll_conv , sigma
def getf0_MAP( frames , Fs , Fmin = F0_MIN , Fmax = F0_MAX , voiceProb= False , PitchDetction= False):
global fs
fs = Fs
nframe , FL = frames.shape
sil_ind , sp_ind = silDetection(frames)
noise_var , noise_var_min = NoiseVarianceEstimation(frames[sp_ind])
noise_variance = np.mean(noise_var_min)
LL = -FL/2*np.log(noise_var)
pitch_idx = viterbi(LL, genTransMat(SIGMA_TRANS , Fmin = F0_MIN , Fmax = F0_MAX ) , ret='pitch')
HarCof = HARMONIC_COFF ( frames[sp_ind] , pitch_idx )
harCofMean , harCovInvMat = genHarCofStat(HarCof.T)
Lambda = noise_variance * harCovInvMat
pInvMats , Gamma = genPseudoInvMats_map( FL , Lambda , harCofMean )
noise_var , noise_var_min = NoiseVarianceEstimation_map ( frames[sp_ind].T, pInvMats , Gamma , F_min = Fmin , F_max = Fmax )
#pdb.set_trace()
LL = -FL/2*np.log(noise_var)
if PitchDetction :
pitch = np.zeros(( nframe ))
pitch[sp_ind] = F0_MIN + F0_RES * viterbi(LL, genTransMat(SIGMA_TRANS \
, Fmin = F0_MIN , Fmax = F0_MAX ) , ret='pitch')
######################################################
vuvll = np.zeros((2,len(sp_ind)))
E_Y = np.sum(frames[sp_ind] ** 2, 1)
vuvll[0] = np.log(E_Y - noise_var_min ) # Reconsrtucted signal Energy (voiced state)
vuvll[1] = np.log( noise_var_min ) # Noise Energy ( unvoiced state)
vuvll[1] = vuvll[1,:] + VU_THRESHOLD
vuv = viterbi(vuvll, np.log(VUV_TRANSITION) , ret='vuv')
vuv1 = findUVerr(vuv) # smoothing
vIdx = np.nonzero(vuv == 1)[0]
uvIdx = np.nonzero(vuv == 0)[0]
vuv = np.zeros(( nframe ))
vuv[sp_ind] = vuv1
if voiceProb :
probvoice = vuvll[0,:]-vuvll[1,:] - VU_THRESHOLD
probvoice = featNormalize(probvoice,0.05,1)
prob_voice = np.zeros(( nframe ))
prob_voice[sp_ind] = probvoice
if np.all(np.isnan(prob_voice)):
print 'Nan in voicing probability '
if PitchDetction != None and voiceProb != None:
pitch[uvIdx]=0
return pitch , prob_voice , vuv
elif PitchDetction != None:
pitch[uvIdx]=0
return pitch , vuv
elif voiceProb != None:
return prob_voice , vuv
else: return vuv
def f0Estimation ( var ):
FL , nframe = np.shape(var)
f0 = np.zeros((nframe,))
#pdb.set_trace()
for frm in range(nframe):
vec = -var[:,frm]
f0[frm] =LobeAreaEstimate(vec) #+ F0_MIN
return f0
def HARMONIC_COFF ( Frames , pitch_idx ):
nframe , FL = np.shape(Frames)
PinvA = genPinvA( FL)
harCoeff = np.zeros((PinvA[0].shape[0] , nframe))
for i in range(nframe):
harCoeff[:,i] = np.dot (PinvA[int(pitch_idx[i])] , Frames[i,:]+eps )
return harCoeff
def getf0_unvoiced ( frames ):
nFrame , FL = frames.shape
var , var_min = NoiseVarianceEstimation ( frames , F_min = F0_MIN , F_max = F0_MAX )
I_max = np.argmin(var,0)
return I_max
def viterbi(obs, trans , ret='vuv'):
m, n = np.shape(obs)
states = np.zeros((m, n+1))
backtrace = np.zeros((m, n+1))
for i in range(1, n+1):
for j in range(m):
delta = states[:, i-1] + trans[:, j]
backtrace[j, i] = np.argmax(delta)
states[j, i] = delta[backtrace[j, i]] + obs[j,i-1]
bestPath = np.zeros(n)
VUv_lable = np.zeros(n)
bestPath[n-1] = np.argmax(states[:, n] )
for i in range(n-2, -1, -1):
bestPath[i] = backtrace[bestPath[i+1], i+2]
out = bestPath
if ret == 'vuv':
for i in range (n):
if bestPath[i] < 1 :
VUv_lable[i] = 1
else:
VUv_lable[i] = 0
out = VUv_lable
return out
def genTransMat(sig_tran , Fmin = F0_MIN , Fmax = F0_MAX):
fzero = np.arange(Fmin, Fmax+1, F0_RES, dtype=float)
nf0 = len(fzero)
pTrans = np.zeros((nf0, nf0))
for i in range(nf0):
for j in range(nf0):
pTrans[i][j] = -.5*np.log(2.0*np.pi*sig_tran*sig_tran) - 1/(2.0*sig_tran*sig_tran)* (fzero[i] - fzero[j])**2
return pTrans
def writeBin( fn , vec , type = 'f'):
os.system('mkdir -p '+fn.split(fn.split('/')[-1])[0])
f = open(fn, 'wb')
for i in xrange(len(vec)):
f.write(struct.pack( type , vec[i] ))
f.close()
def readBin ( fn , fmt = 'f'):
fp = open(fn, 'rb')
wav = fp.read()
sampwidth = 4
fmt = 'f'
wav = struct.unpack('<%d%s' %(len(wav) / sampwidth, fmt), wav)
return wav
def readPinvMat ( fname , Fmin = F0_MIN , Fmax = F0_MAX, F0_res = F0_RES ):
f0range = len(np.arange(Fmin, Fmax+1, F0_res))
FL = int(round(DEFAULT_FRAME_DUR * fs))
float_vec = readBin ( fname , fmt = 'f')
if TYPE_of_MODEL == 'HM': pInvMats = np.resize(float_vec , (f0range , 2*NUMBER_OF_HARMONICS +1 , FL))
elif TYPE_of_MODEL =='TVHM' : pInvMats = np.resize(float_vec , (f0range , FL , FL))
else: raise StandardError, " The model types has to be either HM or TVHM"
return pInvMats
def findUVerr(vec,target=np.expand_dims(np.array([1,1,0,1,1]),1)):
ref_len = len(target)
vec_len = len(vec)
d = []
for f in xrange(vec_len - ref_len):
if np.dot(vec[f:ref_len+f].T , target ) == 4 and vec[f:ref_len+f][2] == 0:
d.append(f+ref_len/2)
if len(d)!=0:
vec[np.array(d,int)] = 1
return vec
def fileNameExt( name ):
name = name.strip()
tmp = name.split('/')
return name[len(name)-len(tmp[-1]):].strip()
def fileDirExt (name):
name = name.strip()
tmp = name.split('/')
return name[0:len(name) - len(tmp[-1])]
| mit |
Unidata/MetPy | v0.11/_downloads/fdf887129bd8090f4be24127e43a5148/xarray_tutorial.py | 1 | 14844 | # Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
xarray with MetPy Tutorial
==========================
`xarray <http://xarray.pydata.org/>`_ is a powerful Python package that provides N-dimensional
labeled arrays and datasets following the Common Data Model. While the process of integrating
xarray features into MetPy is ongoing, this tutorial demonstrates how xarray can be used
within the current version of MetPy. MetPy's integration primarily works through accessors
which allow simplified projection handling and coordinate identification. Unit and calculation
support is currently available in a limited fashion, but should be improved in future
versions.
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import xarray as xr
# Any import of metpy will activate the accessors
import metpy.calc as mpcalc
from metpy.testing import get_test_data
from metpy.units import units
#########################################################################
# Getting Data
# ------------
#
# While xarray can handle a wide variety of n-dimensional data (essentially anything that can
# be stored in a netCDF file), a common use case is working with model output. Such model
# data can be obtained from a THREDDS Data Server using the siphon package, but for this
# tutorial, we will use an example subset of GFS data from Hurrican Irma (September 5th,
# 2017).
# Open the netCDF file as a xarray Dataset
data = xr.open_dataset(get_test_data('irma_gfs_example.nc', False))
# View a summary of the Dataset
print(data)
#########################################################################
# Preparing Data
# --------------
#
# To make use of the data within MetPy, we need to parse the dataset for projection
# information following the CF conventions. For this, we use the
# ``data.metpy.parse_cf()`` method, which will return a new, parsed ``DataArray`` or
# ``Dataset``.
#
# Additionally, we rename our data variables for easier reference.
# To parse the full dataset, we can call parse_cf without an argument, and assign the returned
# Dataset.
data = data.metpy.parse_cf()
# If we instead want just a single variable, we can pass that variable name to parse_cf and
# it will return just that data variable as a DataArray.
data_var = data.metpy.parse_cf('Temperature_isobaric')
# If we want only a subset of variables, we can pass a list of variable names as well.
data_subset = data.metpy.parse_cf(['u-component_of_wind_isobaric',
'v-component_of_wind_isobaric'])
# To rename variables, supply a dictionary between old and new names to the rename method
data = data.rename({
'Vertical_velocity_pressure_isobaric': 'omega',
'Relative_humidity_isobaric': 'relative_humidity',
'Temperature_isobaric': 'temperature',
'u-component_of_wind_isobaric': 'u',
'v-component_of_wind_isobaric': 'v',
'Geopotential_height_isobaric': 'height'
})
#########################################################################
# Units
# -----
#
# MetPy's DataArray accessor has a ``unit_array`` property to obtain a ``pint.Quantity`` array
# of just the data from the DataArray (metadata is removed) and a ``convert_units`` method to
# convert the the data from one unit to another (keeping it as a DataArray). For now, we'll
# just use ``convert_units`` to convert our temperature to ``degC``.
data['temperature'].metpy.convert_units('degC')
#########################################################################
# Coordinates
# -----------
#
# You may have noticed how we directly accessed the vertical coordinates above using their
# names. However, in general, if we are working with a particular DataArray, we don't have to
# worry about that since MetPy is able to parse the coordinates and so obtain a particular
# coordinate type directly. There are two ways to do this:
#
# 1. Use the ``data_var.metpy.coordinates`` method
# 2. Use the ``data_var.metpy.x``, ``data_var.metpy.y``, ``data_var.metpy.vertical``,
# ``data_var.metpy.time`` properties
#
# The valid coordinate types are:
#
# - x
# - y
# - vertical
# - time
#
# (Both approaches and all four types are shown below)
# Get multiple coordinates (for example, in just the x and y direction)
x, y = data['temperature'].metpy.coordinates('x', 'y')
# If we want to get just a single coordinate from the coordinates method, we have to use
# tuple unpacking because the coordinates method returns a generator
vertical, = data['temperature'].metpy.coordinates('vertical')
# Or, we can just get a coordinate from the property
time = data['temperature'].metpy.time
# To verify, we can inspect all their names
print([coord.name for coord in (x, y, vertical, time)])
#########################################################################
# Indexing and Selecting Data
# ---------------------------
#
# MetPy provides wrappers for the usual xarray indexing and selection routines that can handle
# quantities with units. For DataArrays, MetPy also allows using the coordinate axis types
# mentioned above as aliases for the coordinates. And so, if we wanted 850 hPa heights,
# we would take:
print(data['height'].metpy.sel(vertical=850 * units.hPa))
#########################################################################
# For full details on xarray indexing/selection, see
# `xarray's documentation <http://xarray.pydata.org/en/stable/indexing.html>`_.
#########################################################################
# Projections
# -----------
#
# Getting the cartopy coordinate reference system (CRS) of the projection of a DataArray is as
# straightforward as using the ``data_var.metpy.cartopy_crs`` property:
data_crs = data['temperature'].metpy.cartopy_crs
print(data_crs)
#########################################################################
# The cartopy ``Globe`` can similarly be accessed via the ``data_var.metpy.cartopy_globe``
# property:
data_globe = data['temperature'].metpy.cartopy_globe
print(data_globe)
#########################################################################
# Calculations
# ------------
#
# Most of the calculations in `metpy.calc` will accept DataArrays by converting them
# into their corresponding unit arrays. While this may often work without any issues, we must
# keep in mind that because the calculations are working with unit arrays and not DataArrays:
#
# - The calculations will return unit arrays rather than DataArrays
# - Broadcasting must be taken care of outside of the calculation, as it would only recognize
# dimensions by order, not name
#
# As an example, we calculate geostropic wind at 500 hPa below:
lat, lon = xr.broadcast(y, x)
f = mpcalc.coriolis_parameter(lat)
dx, dy = mpcalc.lat_lon_grid_deltas(lon, lat, initstring=data_crs.proj4_init)
heights = data['height'].metpy.loc[{'time': time[0], 'vertical': 500. * units.hPa}]
u_geo, v_geo = mpcalc.geostrophic_wind(heights, f, dx, dy)
print(u_geo)
print(v_geo)
#########################################################################
# Also, a limited number of calculations directly support xarray DataArrays or Datasets (they
# can accept *and* return xarray objects). Right now, this includes
#
# - Derivative functions
# - ``first_derivative``
# - ``second_derivative``
# - ``gradient``
# - ``laplacian``
# - Cross-section functions
# - ``cross_section_components``
# - ``normal_component``
# - ``tangential_component``
# - ``absolute_momentum``
#
# More details can be found by looking at the documentation for the specific function of
# interest.
#########################################################################
# There is also the special case of the helper function, ``grid_deltas_from_dataarray``, which
# takes a ``DataArray`` input, but returns unit arrays for use in other calculations. We could
# rewrite the above geostrophic wind example using this helper function as follows:
heights = data['height'].metpy.loc[{'time': time[0], 'vertical': 500. * units.hPa}]
lat, lon = xr.broadcast(y, x)
f = mpcalc.coriolis_parameter(lat)
dx, dy = mpcalc.grid_deltas_from_dataarray(heights)
u_geo, v_geo = mpcalc.geostrophic_wind(heights, f, dx, dy)
print(u_geo)
print(v_geo)
#########################################################################
# Plotting
# --------
#
# Like most meteorological data, we want to be able to plot these data. DataArrays can be used
# like normal numpy arrays in plotting code, which is the recommended process at the current
# point in time, or we can use some of xarray's plotting functionality for quick inspection of
# the data.
#
# (More detail beyond the following can be found at `xarray's plotting reference
# <http://xarray.pydata.org/en/stable/plotting.html>`_.)
# A very simple example example of a plot of 500 hPa heights
data['height'].metpy.loc[{'time': time[0], 'vertical': 500. * units.hPa}].plot()
plt.show()
#########################################################################
# Let's add a projection and coastlines to it
ax = plt.axes(projection=ccrs.LambertConformal())
data['height'].metpy.loc[{'time': time[0],
'vertical': 500. * units.hPa}].plot(ax=ax, transform=data_crs)
ax.coastlines()
plt.show()
#########################################################################
# Or, let's make a full 500 hPa map with heights, temperature, winds, and humidity
# Select the data for this time and level
data_level = data.metpy.loc[{time.name: time[0], vertical.name: 500. * units.hPa}]
# Create the matplotlib figure and axis
fig, ax = plt.subplots(1, 1, figsize=(12, 8), subplot_kw={'projection': data_crs})
# Plot RH as filled contours
rh = ax.contourf(x, y, data_level['relative_humidity'], levels=[70, 80, 90, 100],
colors=['#99ff00', '#00ff00', '#00cc00'])
# Plot wind barbs, but not all of them
wind_slice = slice(5, -5, 5)
ax.barbs(x[wind_slice], y[wind_slice],
data_level['u'].metpy.unit_array[wind_slice, wind_slice].to('knots'),
data_level['v'].metpy.unit_array[wind_slice, wind_slice].to('knots'),
length=6)
# Plot heights and temperature as contours
h_contour = ax.contour(x, y, data_level['height'], colors='k', levels=range(5400, 6000, 60))
h_contour.clabel(fontsize=8, colors='k', inline=1, inline_spacing=8,
fmt='%i', rightside_up=True, use_clabeltext=True)
t_contour = ax.contour(x, y, data_level['temperature'], colors='xkcd:deep blue',
levels=range(-26, 4, 2), alpha=0.8, linestyles='--')
t_contour.clabel(fontsize=8, colors='xkcd:deep blue', inline=1, inline_spacing=8,
fmt='%i', rightside_up=True, use_clabeltext=True)
# Add geographic features
ax.add_feature(cfeature.LAND.with_scale('50m'), facecolor=cfeature.COLORS['land'])
ax.add_feature(cfeature.OCEAN.with_scale('50m'), facecolor=cfeature.COLORS['water'])
ax.add_feature(cfeature.STATES.with_scale('50m'), edgecolor='#c7c783', zorder=0)
ax.add_feature(cfeature.LAKES.with_scale('50m'), facecolor=cfeature.COLORS['water'],
edgecolor='#c7c783', zorder=0)
# Set a title and show the plot
ax.set_title('500 hPa Heights (m), Temperature (\u00B0C), Humidity (%) at '
+ time[0].dt.strftime('%Y-%m-%d %H:%MZ').item())
plt.show()
#########################################################################
# What Could Go Wrong?
# --------------------
#
# Depending on your dataset and what you are trying to do, you might run into problems with
# xarray and MetPy. Below are examples of some of the most common issues
#
# - Multiple coordinate conflict
# - An axis not being available
# - An axis not being interpretable
# - Arrays not broadcasting in calculations
#
# **Coordinate Conflict**
#
# Code:
#
# ::
#
# x = data['Temperature'].metpy.x
#
# Error Message:
#
# ::
#
# /home/user/env/MetPy/metpy/xarray.py:305: UserWarning: More than
# one x coordinate present for variable "Temperature".
#
# Fix:
#
# Manually assign the coordinates using the ``assign_coordinates()`` method on your DataArray,
# or by specifying the ``coordinates`` argument to the ``parse_cf()`` method on your Dataset,
# to map the ``T`` (time), ``Z`` (vertical), ``Y``, and ``X`` axes (as applicable to your
# data) to the corresponding coordinates.
#
# ::
#
# data['Temperature'].assign_coordinates({'T': 'time', 'Z': 'isobaric',
# 'Y': 'y', 'X': 'x'})
# x = data['Temperature'].metpy.x
#
# or
#
# ::
#
# temperature = data.metpy.parse_cf('Temperature',
# coordinates={'T': 'time', 'Z': 'isobaric',
# 'Y': 'y', 'X': 'x'})
# x = temperature.metpy.x
#
# **Axis Unavailable**
#
# Code:
#
# ::
#
# data['Temperature'].metpy.vertical
#
# Error Message:
#
# ::
#
# AttributeError: vertical attribute is not available.
#
# This means that your data variable does not have the coordinate that was requested, at
# least as far as the parser can recognize. Verify that you are requesting a
# coordinate that your data actually has, and if it still is not available,
# you will need to manually specify the coordinates as discussed above.
#
# **Axis Not Interpretable**
#
# Code:
#
# ::
#
# x, y, ensemble = data['Temperature'].metpy.coordinates('x', 'y', 'ensemble')
#
# Error Message:
#
# ::
#
# AttributeError: 'ensemble' is not an interpretable axis
#
# This means that you are requesting a coordinate that MetPy is (currently) unable to parse.
# While this means it cannot be recognized automatically, you can still obtain your desired
# coordinate directly by accessing it by name. If you have a need for systematic
# identification of a new coordinate type, we welcome pull requests for such new functionality
# on GitHub!
#
# **Broadcasting in Calculations**
#
# Code:
#
# ::
#
# theta = mpcalc.potential_temperature(data['isobaric3'], data['temperature'])
#
# Error Message:
#
# ::
#
# ValueError: operands could not be broadcast together with shapes (9,31,81,131) (31,)
#
# This is a symptom of the incomplete integration of xarray with MetPy's calculations; the
# calculations currently convert the DataArrays to unit arrays that do not recognize which
# coordinates match with which. And so, we must do some manipulations.
#
# Fix 1 (xarray broadcasting):
#
# ::
#
# pressure, temperature = xr.broadcast(data['isobaric3'], data['temperature'])
# theta = mpcalc.potential_temperature(pressure, temperature)
#
# Fix 2 (unit array broadcasting):
#
# ::
#
# theta = mpcalc.potential_temperature(
# data['isobaric3'].metpy.unit_array[None, :, None, None],
# data['temperature'].metpy.unit_array
# )
#
| bsd-3-clause |
chris-b1/feather | integration-tests/test_roundtrips.py | 2 | 1400 | # Copyright 2016 Feather Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pandas.util.testing import assert_frame_equal
import pandas as pd
import feather
import util
def test_factor_rep():
fpath1 = util.random_path()
fpath2 = util.random_path()
rcode = """
library(feather)
iris <- read_feather("{0}")
iris$Species <- as.factor(as.character(iris$Species))
write_feather(iris, "{1}")
""".format(fpath1, fpath2)
tmp_paths = []
try:
iris = pd.read_csv('iris.csv')
levels = ['setosa', 'versicolor', 'virginica']
iris['Species'] = pd.Categorical(iris['Species'], categories=levels)
feather.write_dataframe(iris, fpath1)
util.run_rcode(rcode)
result = feather.read_dataframe(fpath2)
tmp_paths.extend([fpath1, fpath2])
assert_frame_equal(result, iris)
finally:
util.remove_paths(tmp_paths)
| apache-2.0 |
michaelhuang/QuantSoftwareToolkit | QSTK/qstkstudy/EventProfiler.py | 2 | 3086 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Jan 16, 2013
@author: Sourabh Bajaj
@contact: sourabh@sourabhbajaj.com
@summary: EventProfiler
'''
import numpy as np
import matplotlib.pyplot as plt
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.qsdateutil as du
def eventprofiler(df_events_arg, d_data, i_lookback=20, i_lookforward=20,
s_filename='study', b_market_neutral=True, b_errorbars=True,
s_market_sym='SPY'):
''' Event Profiler for an event matix'''
df_close = d_data['close'].copy()
df_rets = df_close.copy()
# Do not modify the original event dataframe.
df_events = df_events_arg.copy()
tsu.returnize0(df_rets.values)
if b_market_neutral == True:
df_rets = df_rets - df_rets[s_market_sym]
del df_rets[s_market_sym]
del df_events[s_market_sym]
df_close = df_close.reindex(columns=df_events.columns)
# Removing the starting and the end events
df_events.values[0:i_lookback, :] = np.NaN
df_events.values[-i_lookforward:, :] = np.NaN
# Number of events
i_no_events = int(np.logical_not(np.isnan(df_events.values)).sum())
assert i_no_events > 0, "Zero events in the event matrix"
na_event_rets = "False"
# Looking for the events and pushing them to a matrix
for i, s_sym in enumerate(df_events.columns):
for j, dt_date in enumerate(df_events.index):
if df_events[s_sym][dt_date] == 1:
na_ret = df_rets[s_sym][j - i_lookback:j + 1 + i_lookforward]
if type(na_event_rets) == type(""):
na_event_rets = na_ret
else:
na_event_rets = np.vstack((na_event_rets, na_ret))
if len(na_event_rets.shape) == 1:
na_event_rets = np.expand_dims(na_event_rets, axis=0)
# Computing daily rets and retuns
na_event_rets = np.cumprod(na_event_rets + 1, axis=1)
na_event_rets = (na_event_rets.T / na_event_rets[:, i_lookback]).T
# Study Params
na_mean = np.mean(na_event_rets, axis=0)
na_std = np.std(na_event_rets, axis=0)
li_time = range(-i_lookback, i_lookforward + 1)
# Plotting the chart
plt.clf()
plt.axhline(y=1.0, xmin=-i_lookback, xmax=i_lookforward, color='k')
if b_errorbars == True:
plt.errorbar(li_time[i_lookback:], na_mean[i_lookback:],
yerr=na_std[i_lookback:], ecolor='#AAAAFF',
alpha=0.7)
plt.plot(li_time, na_mean, linewidth=3, label='mean', color='b')
plt.xlim(-i_lookback - 1, i_lookforward + 1)
if b_market_neutral == True:
plt.title('Market Relative mean return of ' +\
str(i_no_events) + ' events')
else:
plt.title('Mean return of ' + str(i_no_events) + ' events')
plt.xlabel('Days')
plt.ylabel('Cumulative Returns')
plt.savefig(s_filename, format='pdf')
| bsd-3-clause |
elkingtonmcb/scikit-learn | sklearn/preprocessing/label.py | 137 | 27165 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Joel Nothman <joel.nothman@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
hlin117/scikit-learn | examples/linear_model/plot_logistic_multinomial.py | 50 | 2480 | """
====================================================
Plot multinomial and One-vs-Rest Logistic Regression
====================================================
Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
are represented by the dashed lines.
"""
print(__doc__)
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
| bsd-3-clause |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/matplotlib/backends/backend_mixed.py | 10 | 5577 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import six
from matplotlib.backends.backend_agg import RendererAgg
from matplotlib.tight_bbox import process_figure_for_rasterizing
class MixedModeRenderer(object):
"""
A helper class to implement a renderer that switches between
vector and raster drawing. An example may be a PDF writer, where
most things are drawn with PDF vector commands, but some very
complex objects, such as quad meshes, are rasterised and then
output as images.
"""
def __init__(self, figure, width, height, dpi, vector_renderer,
raster_renderer_class=None,
bbox_inches_restore=None):
"""
figure: The figure instance.
width: The width of the canvas in logical units
height: The height of the canvas in logical units
dpi: The dpi of the canvas
vector_renderer: An instance of a subclass of RendererBase
that will be used for the vector drawing.
raster_renderer_class: The renderer class to use for the
raster drawing. If not provided, this will use the Agg
backend (which is currently the only viable option anyway.)
"""
if raster_renderer_class is None:
raster_renderer_class = RendererAgg
self._raster_renderer_class = raster_renderer_class
self._width = width
self._height = height
self.dpi = dpi
self._vector_renderer = vector_renderer
self._raster_renderer = None
self._rasterizing = 0
# A reference to the figure is needed as we need to change
# the figure dpi before and after the rasterization. Although
# this looks ugly, I couldn't find a better solution. -JJL
self.figure = figure
self._figdpi = figure.get_dpi()
self._bbox_inches_restore = bbox_inches_restore
self._set_current_renderer(vector_renderer)
_methods = """
close_group draw_image draw_markers draw_path
draw_path_collection draw_quad_mesh draw_tex draw_text
finalize flipy get_canvas_width_height get_image_magnification
get_texmanager get_text_width_height_descent new_gc open_group
option_image_nocomposite points_to_pixels strip_math
start_filter stop_filter draw_gouraud_triangle
draw_gouraud_triangles option_scale_image
_text2path _get_text_path_transform height width
""".split()
def _set_current_renderer(self, renderer):
self._renderer = renderer
for method in self._methods:
if hasattr(renderer, method):
setattr(self, method, getattr(renderer, method))
renderer.start_rasterizing = self.start_rasterizing
renderer.stop_rasterizing = self.stop_rasterizing
def start_rasterizing(self):
"""
Enter "raster" mode. All subsequent drawing commands (until
stop_rasterizing is called) will be drawn with the raster
backend.
If start_rasterizing is called multiple times before
stop_rasterizing is called, this method has no effect.
"""
# change the dpi of the figure temporarily.
self.figure.set_dpi(self.dpi)
if self._bbox_inches_restore: # when tight bbox is used
r = process_figure_for_rasterizing(self.figure,
self._bbox_inches_restore)
self._bbox_inches_restore = r
if self._rasterizing == 0:
self._raster_renderer = self._raster_renderer_class(
self._width*self.dpi, self._height*self.dpi, self.dpi)
self._set_current_renderer(self._raster_renderer)
self._rasterizing += 1
def stop_rasterizing(self):
"""
Exit "raster" mode. All of the drawing that was done since
the last start_rasterizing command will be copied to the
vector backend by calling draw_image.
If stop_rasterizing is called multiple times before
start_rasterizing is called, this method has no effect.
"""
self._rasterizing -= 1
if self._rasterizing == 0:
self._set_current_renderer(self._vector_renderer)
height = self._height * self.dpi
buffer, bounds = self._raster_renderer.tostring_rgba_minimized()
l, b, w, h = bounds
if w > 0 and h > 0:
image = np.frombuffer(buffer, dtype=np.uint8)
image = image.reshape((h, w, 4))
image = image[::-1]
gc = self._renderer.new_gc()
# TODO: If the mixedmode resolution differs from the figure's
# dpi, the image must be scaled (dpi->_figdpi). Not all
# backends support this.
self._renderer.draw_image(
gc,
float(l) / self.dpi * self._figdpi,
(float(height)-b-h) / self.dpi * self._figdpi,
image)
self._raster_renderer = None
self._rasterizing = False
# restore the figure dpi.
self.figure.set_dpi(self._figdpi)
if self._bbox_inches_restore: # when tight bbox is used
r = process_figure_for_rasterizing(self.figure,
self._bbox_inches_restore,
self._figdpi)
self._bbox_inches_restore = r
| bsd-2-clause |
mkery/CS349-roads | Trip.py | 1 | 11056 | #This program takes in a filename and a set of features and creates a trip object.
import matplotlib.pyplot as pyplot
import numpy as np
import os
import sys
import math
import Pmf
from scipy.ndimage import gaussian_filter1d
#computes the norm of a vector (x,y)
def computeNorm(x, y):
return distance(x, y, 0, 0)
#computes the euclidean distance between two points (x0,y0) and (x1,y1)
def distance(x0, y0, x1, y1):
return math.sqrt((x1-x0)**2 + (y1-y0)**2)
#uses the inner product formula to compute the angle between two vectors
def computeAngle (p1, p2):
dot = 0
if computeNorm(p2[0], p2[1]) == 0 or computeNorm(p1[0], p1[1])==0: #if one vector is the zero vector, the dot product is 0
dot = 0
else:
dot = (p2[0]*p1[0]+p2[1]*p1[1])/float(computeNorm(p1[0], p1[1])*computeNorm(p2[0], p2[1]))
#fix floating point issues to use the acos function
if dot > 1:
dot = 1
elif dot < -1:
dot = -1
return math.acos(dot)*180/math.pi
#computes all stops along a given trip, returning them as a (start, end) tuple
def findStops(speeds):
stops = [] #stops are a start and end time pair
start = -1
end = -1
for i in range(1, len(speeds)):
advS = (speeds[i] + speeds[i-1])/2 #smooth out noise in stop duration
if speeds[i] == 0: #start of stop
end = i
if start == -1:
start = i
elif start > -1 and advS > 1:
stops.append([start,end])
start = -1
end = -1
if start > -1:
stops.append([start, len(speeds)])
return stops
#returns a string representation of a given feature
def printHist_Feature(hist):
h = ""
for i in range(len(hist)-1):
h += str(hist[i])+","
#to avoid final comma (will mess up input)
h += str(hist[len(hist)-1])
return h
class Trip(object):
def __init__(self, filename, feat):
#read in trip from file
self.tripPathRaw = np.genfromtxt(filename, delimiter=',', skip_header=1)
self.numFeatures = 16
#expand feature string to have entries for all features
if len(feat) < self.numFeatures:
for i in range (len(feat), self.numFeatures):
feat+="0"
self.feat = feat
self.tripPath=self.tripPathRaw #used for testing path smoothing
#self.plotTrip()
self.dV = []
#initialize features
self.v = [] #speed at each second
self.acc = [] #acceleration at each second
self.v_a = [] #velocity*acceleration = horsepower/mass
self.jerk = [] #change of acceleration over a second
self.ang = [] #change of angle over 3 seconds
self.ang_sp = [] #turning agression = speed*angle
self.ang_or = [] #angle from the initial vector
self.low_sp_count = [] #time at low speed (below 0.25)
self.dist = [] #distance driven up to a particular point
self.bee_dist = [] #bee flight distance
self.turn_dist = [] #turning distance
self.turn_ang = [] #turning angle
self.sharp_turn_sp = []
#compute features
self.computeSpeedAcc()
self.computeTurningAngles()
self.computeTimeLowSpeeds()
self.findTurns()
#self.plotTrip()
#turn into histograms
self.computeHistograms()
#basic features
self.tripTime = self.tripPath.shape[0] #length of trip
self.advSpeed = self.tripDist/self.tripTime #meters per second
self.maxSpeed = max(self.v)
#self.stops = findStops(self.v)#len(findStops(self.v))
#self.speed_hist, self.acc = findSpeed_Hist(self.tripPath)
#code adapted from stackexchage: http://stackoverflow.com/questions/15178146/line-smoothing-algorithm-in-python
#applies Gaussian smoothing to a given list of data
def smooth_data(self, data):
#print data
a = np.array(data)
x, y = a.T
t = np.linspace(0, 1, len(x))
t2 = np.linspace(0, 1, 100)
x2 = np.interp(t2, t, x)
y2 = np.interp(t2, t, y)
sigma = 0.5
x3 = gaussian_filter1d(x2, sigma)
y3 = gaussian_filter1d(y2, sigma)
temp = [(x3[i],y3[i]) for i in range(len(x3))]
temp = np.array(self.tripPath)
return data
#computes turning angle over 3s, turning speed*turning angle over 3s and change of heading
def computeTurningAngles (self):
#computes velocity vectors and change of heading
self.dV = []
for ind in range (1, len(self.tripPath)):
self.dV.append(((self.tripPath[ind][0]-self.tripPath[ind-1][0]),(self.tripPath[ind][1]-self.tripPath[ind-1][1])))
try:
self.ang_or.append(computeAngle((1,0), self.dV[ind-1]))
except ZeroDivisionError:
self.ang_or.append(0)
#computes data about turning within 3-second windows of time
for ind in range (2, len(self.dV)):
angle = computeAngle(self.dV[ind-2], self.dV[ind])
self.ang.append(angle)
self.ang_sp.append(angle*((self.v[ind-2]+self.v[ind-1]+self.v[ind]))/3)
#finds turns above 50 degrees, recording the total turning angle, turning distance and average turning speed*angle
def findTurns (self):
self.t1 = []
self.t2 = []
ind = 0 #starting time of turn
curr = 1 #will eventually store the end of each turn
th = 50 #threshold
while(ind<len(self.dV) and curr < len(self.dV)):
#print ind
#print "-", curr
#print computeAngle(self.dV[curr], self.dV[ind])
prev = 0
while(curr <len(self.dV)):
ang = computeAngle(self.dV[curr], self.dV[ind])
#if we are still turning
if ang > prev:
prev = ang
curr += 1
#if the turn has ended
else:
if prev > th:
self.turn_ang.append(prev)
self.turn_dist.append(self.dist[curr] - self.dist[ind])
self.sharp_turn_sp.append((self.dist[curr] - self.dist[ind])/(curr-ind+1))
#print str(ind) + " " + str(curr-1) + " " + str(prev) + " " +str(self.dist[curr]) +" " + str(self.dist[ind]) +" "+ str(self.dist[curr] - self.dist[ind])
#recorded for testing and plotting purposes
self.t1.append(self.tripPath[ind][0])
self.t2.append(self.tripPath[ind][1])
self.t1.append(self.tripPath[curr][0])
self.t2.append(self.tripPath[curr][1])
break #current turn is completed
#adjust the new starting point of turn
if curr == len(self.dV):
break
if ind == curr-1:
ind = ind+1
curr = ind+1
else:
ind = curr-1
#computes features related to speed, acceleration, and distances
def computeSpeedAcc(self):
#initialize
self.tripDist = 0
self.v.append(0)
self.acc.append(0)
self.v_a.append(0)
self.dist.append(0)
self.bee_dist.append(0)
#compute speed, acceleration, speed*acceleration, distance, bee-line distance, total trip distance
for i in range (1,len(self.tripPath)):
curr = distance(self.tripPath[i-1][0], self.tripPath[i-1][1], self.tripPath[i][0], self.tripPath[i][1])
self.tripDist += curr
self.v.append(curr)
self.acc.append(self.v[i]-self.v[i-1])
self.v_a.append(self.v[i]*self.acc[i])
self.dist.append(self.tripDist)
self.bee_dist.append(distance(self.tripPath[i][0], self.tripPath[i][1], 0, 0))
#smooth acceleration to compute jerk
temp = [(z, self.acc[z]) for z in range(len(self.acc))]
temp = self.smooth_data(temp)
temp = [temp[i][1] for i in range(len(temp))]
self.jerk = np.diff(temp)
#print self.dist
#computes the amount of time spent at low speed, below the 0.05, 0.1, 0.15, 0.2, and 0.25 percentiles
def computeTimeLowSpeeds (self):
self.low_sp_count = [0 for i in range(6)]
perc = [np.percentile (self.v, j*0.05) for j in range (6)]
for i in range(len(self.v)):
for j in range (6):
if (self.v[i]<perc[j]):
self.low_sp_count[j]+=1
break
#computes histograms for all features
def computeHistograms(self):
self.ang_hist = self.computeHist(self.ang)
self.ang_sp_hist = self.computeHist(self.ang_sp)
self.ang_or_hist = self.computeHist(self.ang_or)
self.speed_hist = self.computeHist(self.v)
self.acc_hist = self.computeHist(self.acc)
self.v_a_hist = self.computeHist(self.v_a)
self.jerk_hist = self.computeHist(self.jerk)
self.dist_hist = self.computeHist(self.dist)
self.bee_dist_hist = self.computeHist(self.bee_dist)
self.turn_ang_hist = self.computeHist(self.turn_ang)
self.turn_dist_hist = self.computeHist(self.turn_dist)
self.sharp_turn_sp_hist = self.computeHist(self.sharp_turn_sp)
#computes histogram for one feature, adding mean and standard deviation to the end of the list
def computeHist(self, data):
#inspired by Philipp Eulenberg from the Kaggle forum
bins = [5,10,25,50,75,85,90,95,97,98,99,100]
#if the data list is empty
if np.array(data).shape[0] == 0:
return [0 for i in range(len(bins)+2)]
hist = [np.percentile(data, bins[i]) for i in range(len(bins))]
mean = np.mean(data)
stdev = np.std(data)
hist.append(mean)
hist.append(stdev)
return hist
#creates a string representation of a trip using the specified features
def printFeatures(self):
allFeatures = [str(self.tripDist), str (self.advSpeed), str(self.maxSpeed), printHist_Feature(self.speed_hist), printHist_Feature(self.acc_hist), printHist_Feature(self.ang_hist), printHist_Feature(self.ang_sp_hist), printHist_Feature(self.v_a_hist), printHist_Feature(self.ang_or_hist), printHist_Feature(self.low_sp_count), printHist_Feature(self.jerk_hist), printHist_Feature(self.dist_hist), printHist_Feature(self.bee_dist_hist), printHist_Feature(self.turn_ang_hist), printHist_Feature(self.turn_dist_hist), printHist_Feature(self.sharp_turn_sp_hist)]
features = ""
for i in range(self.numFeatures):
if self.feat[i]=='1':
features += allFeatures[i] + ","
"""
features += str(self.tripDist)+","
features += str (self.advSpeed) + ","
features += str(self.maxSpeed) + ","
features += printHist_Feature(self.speed_hist)+"," #1
features += printHist_Feature(self.acc_hist) + "," #2
features += printHist_Feature(self.ang_hist) + "," #3
features += printHist_Feature(self.ang_sp_hist) + "," #4
features += printHist_Feature(self.v_a_hist) + "," #5
features += printHist_Feature(self.ang_or_hist) +"," #6
features += printHist_Feature(self.low_sp_count) + "," #7
features += printHist_Feature(self.jerk_hist) + "," #8
features += printHist_Feature(self.dist_hist) + "," #9
features += printHist_Feature(self.bee_dist_hist) + "," #10
features += printHist_Feature(self.turn_ang_hist) + "," #11
features += printHist_Feature(self.turn_dist_hist) +"," #12
features += printHist_Feature(self.sharp_turn_sp_hist) + "," #13
"""
return features[:-1] + "\n"
#plots trip, usually original path and path with modifications, used primarily for testing purposes
def plotTrip(self):
pyplot.figure(1)
pyplot.subplot(211)
startPoint = (self.tripPathRaw[0])
endPoint = (self.tripPathRaw[self.tripPathRaw.shape[0]-1])
pyplot.plot(self.tripPathRaw[:,0], self.tripPathRaw[:,1], 'r-', startPoint[0], startPoint[1], 'gD', endPoint[0], endPoint[1], 'bD', self.t1, self.t2, "kx")
pyplot.subplot(212)
startPoint = (self.tripPath[0])
endPoint = (self.tripPath[self.tripPath.shape[0]-1])
pyplot.plot(self.tripPath[:,0], self.tripPath[:,1], 'r-', startPoint[0], startPoint[1], 'gD', endPoint[0], endPoint[1], 'bD')
pyplot.show()
"""trip_test = Trip(sys.argv[1])
trip_test.plotTrip()
print trip_test.advSpeed"""
#t = Trip("../drivers/2/100.csv")
| mit |
anurag313/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
fabioticconi/scikit-learn | examples/neural_networks/plot_mnist_filters.py | 57 | 2195 | """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
from sklearn.neural_network import MLPClassifier
mnist = fetch_mldata("MNIST original")
# rescale the data, use the traditional train/test split
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# algorithm='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
algorithm='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
yask123/scikit-learn | sklearn/decomposition/__init__.py | 147 | 1421 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
myuuuuun/various | ContinuousAlgorithm/HW3/HW3-1.py | 2 | 4385 | #-*- encoding: utf-8 -*-
"""
solve ordinary differential equations
Copyright (c) 2016 @myuuuuun
Released under the MIT license.
"""
import math
import numpy as np
import pandas as pd
import functools
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
EPSIRON = 1.0e-8
np.set_printoptions(precision=3)
np.set_printoptions(linewidth=400)
np.set_printoptions(threshold=np.nan)
pd.set_option('display.max_columns', 130)
pd.set_option('display.width', 1400)
plt.rcParams['font.size'] = 14
# 日本語対応
mpl.rcParams['font.family'] = 'Osaka'
# Explicit Euler Method
# 陽的(前進)オイラー法
def euler(func, init, t_start, step, repeat):
if not isinstance(func, list):
func = [func]
if not isinstance(init, list):
init = [init]
if len(init) != len(func):
raise ValueError("微分係数の数と初期値の数が一致しません")
dim = len(func)
path = np.zeros((dim+1, repeat), dtype=float)
path[:, 0] = [t_start] + init
for i in range(1, repeat):
current = path[1:, i-1]
path[0, i] = t_start + i * step
for s in range(dim):
path[s+1, i] = current[s] + func[s](current) * step
return path
# Modified Euler Method
# 修正オイラー法
def modified_euler(func, init, t_start, step, repeat):
if not isinstance(func, list):
func = [func]
if not isinstance(init, list):
init = [init]
if len(init) != len(func):
raise ValueError("微分係数の数と初期値の数が一致しません")
dim = len(func)
path = np.zeros((dim+1, repeat), dtype=float)
path[:, 0] = [t_start] + init
k1 = np.zeros(dim, dtype=float)
k2 = np.zeros(dim, dtype=float)
for i in range(1, repeat):
current = path[1:, i-1]
path[0, i] = t_start + i * step
# k1
for s in range(dim):
k1[s] = func[s](current)
# k2
for s in range(dim):
k2[s] = func[s](current + step * k1)
path[1:, i] = current + step * (k1 + k2) / 2
return path
# Explicit RK4 Method
# 4段4次ルンゲ・クッタ
def runge_kutta(func, init, t_start, step, repeat):
if not isinstance(func, list):
func = [func]
if not isinstance(init, list):
init = [init]
if len(init) != len(func):
raise ValueError("微分係数の数と初期値の数が一致しません")
dim = len(func)
path = np.zeros((dim+1, repeat), dtype=float)
path[:, 0] = [t_start] + init
k1 = np.zeros(dim, dtype=float)
k2 = np.zeros(dim, dtype=float)
k3 = np.zeros(dim, dtype=float)
k4 = np.zeros(dim, dtype=float)
for i in range(1, repeat):
current = path[1:, i-1]
path[0, i] = t_start + i * step
# k1
for s in range(dim):
k1[s] = func[s](current)
# k2
for s in range(dim):
k2[s] = func[s](current + step * 0.5 * k1)
# k3
for s in range(dim):
k3[s] = func[s](current + step * 0.5 * k2)
# k4
for s in range(dim):
k4[s] = func[s](current + step * k3)
path[1:, i] = current + step * (k1 + 2*k2 + 2*k3 + k4) / 6
return path
if __name__ == '__main__':
"""
Sample: solve x''(t) = -x, x(0) = 1, x'(0) = 0
analytic solution is x(t) = cos(t)
"""
x = lambda array: array[1]
dx = lambda array: -1 * array[0]
func = [x, dx]
init = [1, 0]
t_start = 0
step = 0.01
repeat = 10000
ts = np.arange(t_start, step*repeat, step)
true_path = np.cos(ts)
euler_path = euler(func, init, t_start, step, repeat)
modified_euler_path = modified_euler(func, init, t_start, step, repeat)
rk4_path = runge_kutta(func, init, t_start, step, repeat)
fig, ax = plt.subplots(figsize=(16, 8))
plt.title(r'Initial value problem $\"x = -x(t)$')
plt.xlabel("t")
plt.ylabel("x")
plt.plot(ts, true_path, color='orange', linewidth=3, label="true_path(x=cos(t))")
plt.plot(euler_path[0], euler_path[1], color='blue', linewidth=1, label="Euler approx")
plt.plot(modified_euler_path[0], modified_euler_path[1], color='green', linewidth=2, label="Modified Euler approx")
plt.plot(rk4_path[0], rk4_path[1], color='red', linewidth=1, label="RK4 approx")
plt.legend()
plt.show()
| mit |
alexmoratalla/yambopy | yambopy/dbs/savedb.py | 2 | 16585 | # Copyright (c) 2018, Henrique Miranda
# All rights reserved.
#
# This file is part of the yambopy project
#
import os
import numpy as np
from itertools import product
from netCDF4 import Dataset
from yambopy.plot.plotting import add_fig_kwargs
from yambopy.plot import *
from yambopy.tools.string import marquee
from yambopy.lattice import isbetween, car_red, red_car, rec_lat, vol_lat
from yambopy.units import ha2ev
max_exp = 50
atol = 1e-3
def vec_in_list(veca,vec_list):
"""
Check if a vector exists in a list of vectors
"""
return np.array([ np.allclose(veca,vecb,rtol=atol,atol=atol) for vecb in vec_list ]).any()
class YamboSaveDB():
"""
Reads the information from the SAVE database in Yambo
Arguments:
``save``: Path with the save folder (default:SAVE)
``filename``: name of the filename of the ns.db1 database created with yambo (default:ns.db1)
**Properties:**
``atomic_numbers`` : atomic number of the species
``eigenvalues`` : eigenvalues of the electrons in eV
``nkpoints`` : number of kpoints
"""
def __init__(self,atomic_numbers,car_atomic_positions,eigenvalues,sym_car,kpts_iku,
lat,alat,temperature,electrons,spin,time_rev):
self.atomic_numbers = atomic_numbers
self.car_atomic_positions = car_atomic_positions
self.eigenvalues = eigenvalues
self.sym_car = sym_car
self.kpts_iku = kpts_iku
self.lat = lat
self.alat = alat
self.temperature = temperature
self.electrons = electrons
self.spin = spin
self.time_rev = time_rev
#TODO: remove this
self.expanded = False
@classmethod
def from_db_file(cls,folder='.',filename='ns.db1'):
"""
Read the ns.db1 database
"""
path_filename = os.path.join(folder,filename)
if not os.path.isfile(path_filename):
raise FileNotFoundError( "Error reading %s database in YamboSaveDB"%path_filename )
with Dataset(path_filename) as database:
dimensions = database.variables['DIMENSIONS'][:]
natoms_a = database.variables['N_ATOMS'][:].astype(int).T
tmp_an = database.variables['atomic_numbers'][:].astype(int)
tmp_apos = database.variables['ATOM_POS'][:,:]
flatten = lambda l: [item for sublist in l for item in sublist]
atomic_numbers = flatten([[tmp_an[n]]*na for n,na in enumerate(natoms_a)])
atomic_positions = np.vstack([[tmp_apos[n,ia] for ia in range(na)] for n,na in enumerate(natoms_a) ])
args = dict( atomic_numbers = atomic_numbers,
car_atomic_positions = atomic_positions,
eigenvalues = database.variables['EIGENVALUES'][0,:]*ha2ev,
sym_car = database.variables['SYMMETRY'][:],
kpts_iku = database.variables['K-POINTS'][:].T,
lat = database.variables['LATTICE_VECTORS'][:].T,
alat = database.variables['LATTICE_PARAMETER'][:].T,
temperature = dimensions[13],
electrons = dimensions[14],
spin = int(dimensions[11]),
time_rev = dimensions[9] )
return cls(**args)
@property
def red_atomic_positions(self):
return car_red(self.car_atomic_positions,self.lat)
@property
def spin_degen(self):
"""spin degeneracy if 2 components degen 1 else degen 2"""
return [0,2,1][int(self.spin)]
@property
def min_eival(self):
return np.min(self.eigenvalues)
@property
def max_eival(self):
return np.max(self.eigenvalues)
@property
def car_kpoints(self):
"""convert form internal yambo units to cartesian lattice units"""
return np.array([ k/self.alat for k in self.kpts_iku ])
@property
def red_kpoints(self):
"""convert from cartesian coordinates to reduced coordinates"""
if not hasattr(self,"_red_kpoints"):
self._red_kpoints = car_red(self.car_kpoints,self.rlat)
return self._red_kpoints
@property
def rlat(self):
"""caclulate the reciprocal lattice"""
return rec_lat(self.lat)
@property
def rlat_vol(self):
return (2*np.pi)**3 * vol_lat(self.rlat)
@property
def lat_vol(self):
return vol_lat(self.lat)
@property
def natoms(self):
return len(self.atomic_positions)
@property
def nbands(self):
_,nbands = self.eigenvalues.shape
return nbands
@property
def nkpoints(self):
return len(self.kpts_iku)
@property
def time_rev_list(self):
"""get a list of symmetries with time reversal"""
time_rev_list = [False]*self.nsym
for i in range(self.nsym):
time_rev_list[i] = ( i >= self.nsym/(self.time_rev+1) )
return time_rev_list
@property
def sym_rlu(self):
"""convert cartesian transformations to reduced transformations """
sym_rlu = np.zeros([self.nsym,3,3])
for n,s in enumerate(self.sym_car):
a = np.dot(s.T,inv(self.rlat))
sym_rlu[n] = np.dot(inv(self.lat.T),a)
return sym_rlu
@property
def nsym(self):
return len(self.sym_car)
@property
def sym_red(self):
"""Convert cartesian transformations to reduced transformations"""
if not hasattr(self,"_sym_red"):
sym_red = np.zeros([self.nsym,3,3],dtype=int)
for n,s in enumerate(self.sym_car):
sym_red[n] = np.round(np.dot(np.dot(self.lat,s.T),np.linalg.inv(self.lat)))
self._sym_red = sym_red
return self._sym_red
@property
def sym_rec_red(self):
"""Convert reduced transformations to reduced reciprocal transformations"""
if not hasattr(self,"_sym_rec_red"):
sym_rec_red = np.zeros([self.nsym,3,3],dtype=int)
for n,s in enumerate(self.sym_red):
sym_rec_red[n] = np.linalg.inv(s).T
self._sym_rec_red = sym_rec_red
return self._sym_rec_red
@property
def sym_rec(self):
"""Convert cartesian transformations to reciprocal transformations"""
sym_rec = np.zeros([self.nsym,3,3])
for n,s in enumerate(self.sym_car):
sym_rec[n] = np.linalg.inv(s).T
return sym_rec
@property
def efermi(self):
if not hasattr(self,"_efermi"):
# break here??
# I have changed get_efermi by fermi and now it works. To be check by the author
self._efermi = self.get_fermi
return self._efermi
def get_fermi(self,inv_smear=0.001,verbose=0):
""" Determine the fermi energy
"""
from scipy.optimize import bisect
kpts, nks, nss = self.expand_kpts()
def fermi(e):
""" fermi dirac function
"""
if e > max_exp:
return 0
elif e < -max_exp:
return 1
return 1/(np.exp(e)+1)
def fermi_array(e_array,ef):
""" Fermi dirac function for an array
"""
e_array = (e_array-ef)/inv_smear
return [ fermi(e) for e in e_array]
def occupation_minus_ne(ef):
""" The total occupation minus the total number of electrons
"""
return sum([sum(self.spin_degen*fermi_array(self.eigenvalues[nk],ef))*self.weights[nk] for nk in range(self.nkpoints)])-self.electrons
efermi = bisect(occupation_minus_ne,self.min_eival,self.max_eival)
if verbose: print("fermi: %lf eV"%efermi)
self.eigenvalues -= efermi
self.occupations = np.zeros([self.nkpoints,self.nbands],dtype=np.float32)
for nk in range(self.nkpoints):
self.occupations[nk] = fermi_array(self.eigenvalues[nk,:self.nbands],0)
return efermi
def write_kpoints(self,filename_full='kpts_full.dat',filename='kpts.dat'):
""" Write the kpoints in a file
"""
kpts, nks, nss = self.expand_kpts()
f = open(filename_full,'w')
for k in kpts:
f.write(("%12.8lf "*3)%tuple(k)+"\n")
f.close()
f = open(filename,'w')
for k in self.car_kpoints:
f.write(("%12.8lf "*3)%tuple(k)+"\n")
f.close()
def get_path(self,path,kpts=None,debug=False):
""" Obtain a list of indexes and kpoints that belong to the regular mesh
"""
if kpts is None:
kpts, nks, nss = self.expand_kpts()
else:
nks = list(range(len(kpts)))
#points in cartesian coordinates
path_car = red_car(path, self.rlat)
#find the points along the high symmetry lines
distance = 0
bands_kpoints = []
bands_indexes = []
#for all the paths
for k in range(len(path)-1):
# store here all the points in the path
# key: has the coordinates of the kpoint rounded to 4 decimal places
# value: index of the kpoint
# distance to the starting kpoint
# the kpoint cordinate
kpoints_in_path = {}
start_kpt = path_car[k] #start point of the path
end_kpt = path_car[k+1] #end point of the path
#generate repetitions of the brillouin zone
for x,y,z in product(list(range(-1,2)),list(range(-1,2)),list(range(1))):
#shift the brillouin zone
shift = red_car([np.array([x,y,z])],self.rlat)[0]
#iterate over all the kpoints
for index, kpt in zip(nks,kpts):
kpt_shift = kpt+shift #shift the kpoint
#if the point is collinear we add it
if isbetween(start_kpt,end_kpt,kpt_shift):
key = tuple([round(kpt,4) for kpt in kpt_shift])
value = [ index, np.linalg.norm(start_kpt-kpt_shift), kpt_shift ]
kpoints_in_path[key] = value
#sort the points acoording to distance to the start of the path
kpoints_in_path = sorted(list(kpoints_in_path.values()),key=lambda i: i[1])
#for all the kpoints in the path
for index, disp, kpt in kpoints_in_path:
bands_kpoints.append( kpt )
bands_indexes.append( index )
if debug: print ("%12.8lf "*3)%tuple(kpt), index
self.bands_kpoints = bands_kpoints
self.bands_indexes = bands_indexes
self.bands_highsym_qpts = path_car
return bands_kpoints, bands_indexes, path_car
def expand_kpts(self):
""" Take a list of qpoints and symmetry operations and return the full brillouin zone
with the corresponding index in the irreducible brillouin zone
"""
#check if the kpoints were already exapnded
if self.expanded == True: return self.kpoints_full, self.kpoints_indexes, self.symmetry_indexes
kpoints_indexes = []
kpoints_full = []
symmetry_indexes = []
#kpoints in the full brillouin zone organized per index
kpoints_full_i = {}
#expand using symmetries
for nk,k in enumerate(self.car_kpoints):
for ns,sym in enumerate(self.sym_car):
new_k = np.dot(sym,k)
#check if the point is inside the bounds
k_red = car_red([new_k],self.rlat)[0]
k_bz = (k_red+atol)%1
#if the index in not in the dicitonary add a list
if nk not in kpoints_full_i:
kpoints_full_i[nk] = []
#if the vector is not in the list of this index add it
if not vec_in_list(k_bz,kpoints_full_i[nk]):
kpoints_full_i[nk].append(k_bz)
kpoints_full.append(new_k)
kpoints_indexes.append(nk)
symmetry_indexes.append(ns)
#calculate the weights of each of the kpoints in the irreducible brillouin zone
self.full_nkpoints = len(kpoints_full)
weights = np.zeros([self.nkpoints])
for nk in kpoints_full_i:
weights[nk] = float(len(kpoints_full_i[nk]))/self.full_nkpoints
#set the variables
self.expanded = True
self.weights = np.array(weights)
self.kpoints_full = np.array(kpoints_full)
self.kpoints_indexes = np.array(kpoints_indexes)
self.symmetry_indexes = np.array(symmetry_indexes)
print("%d kpoints expanded to %d"%(len(self.car_kpoints),len(kpoints_full)))
return self.kpoints_full, self.kpoints_indexes, self.symmetry_indexes
def plot_bs_ax(self,ax,path,bandmin=None,bandmax=None,add_indexes=False,**kwargs):
"""
Plot this bandstructure on Matpltolib ax
"""
bands_kpoints, bands_indexes, bands_highsym_qpts = self.get_path(path)
self.get_fermi()
#calculate distances
bands_distances = [0]
distance = 0
for nk in range(1,len(bands_kpoints)):
distance += np.linalg.norm(bands_kpoints[nk]-bands_kpoints[nk-1])
bands_distances.append(distance)
#plot highsymetry qpoints
distance = 0
bands_highsym_qpts_distances = [0]
for nk in range(1,len(bands_highsym_qpts)):
ax.axvline(distance,color='k')
distance += np.linalg.norm(bands_highsym_qpts[nk]-bands_highsym_qpts[nk-1])
bands_highsym_qpts_distances.append(distance)
ax.axvline(distance,color='k')
#plot bands
color = kwargs.pop('c','red')
ax.plot(bands_distances,self.eigenvalues[bands_indexes,bandmin:bandmax],c=color,**kwargs)
ax.set_xlim(0,max(bands_distances))
if add_indexes:
ax.set_xticks(bands_distances)
ax.set_xticklabels(np.array(bands_indexes)+1)
for d in bands_distances:
ax.axvline(d,color='k',alpha=0.5)
return ax
@add_fig_kwargs
def plot_bs(self,path,**kwargs):
""" Plot the difference in energies of two bands
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
self.plot_bs_ax(ax,path,**kwargs)
return fig
def plot_bs_bz(self,size=20,bandc=1,bandv=None,expand=True,repx=list(range(3)),repy=list(range(3)),repz=list(range(3))):
""" Plot the difference in energies of two bands
"""
if bandv is None: bandv = self.nbandsv
cmap = plt.get_cmap("viridis")
eigenvalues = self.eigenvalues
print("tansitions %d -> %d"%(bandv,bandc))
weights = (eigenvalues[:,bandc-1]-eigenvalues[:,bandv-1])
print("min:", min(weights))
print("max:", max(weights))
weights = weights/max(weights)
if expand:
kpts, nks = self.expand_kpts(repx=repx,repy=repy,repz=repz)
weights = weights[nks]
else:
kpts = self.car_kpoints
fig = plt.figure(figsize=(10,10))
plt.scatter(kpts[:,0], kpts[:,1], s=size, marker='H', cmap=cmap, lw=0, c=weights)
ax = plt.axes()
ax.set_aspect('equal')
plt.show()
def __str__(self):
lines = []; app = lines.append
app(marquee(self.__class__.__name__))
app("reciprocal lattice:")
app("\n".join([("%12.8lf "*3)%tuple(r) for r in self.rlat]))
app("lattice:")
app("\n".join([("%12.8lf "*3)%tuple(r) for r in self.lat]))
app("alat:")
app(("%12.8lf "*3)%tuple(self.alat))
app("atom positions:")
for an, pos in zip(self.atomic_numbers, self.red_atomic_positions):
app( "%3d " % an + ("%12.8lf " * 3) % tuple(pos) )
app("nkpoints: %d"%self.nkpoints)
app("symmetry operations: %d\n"%len(self.sym_car))
app("temperature : %lf"%self.temperature)
app("electrons : %lf"%self.electrons)
return "\n".join(lines)
| bsd-3-clause |
bigdataelephants/scikit-learn | examples/decomposition/plot_pca_iris.py | 253 | 1801 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
ryokbys/nap | nappy/manipulate.py | 1 | 9759 | #!/usr/bin/env python
"""
Manipulation functions.
Usage:
manipulate.py [options]
Options:
-h, --help Show this message and exit.
"""
from __future__ import print_function
from docopt import docopt
import numpy as np
import copy
import nappy
__author__ = "RYO KOBAYASHI"
__version__ = "200424"
def substitute(nsys0,spc1,spc2,num=1):
"""
Substitute an atom of SPC1 in the system NSYS with SPC2.
Parameters
----------
nsys0 : NAPSystem object
System in which atoms are to be replaced. (not inplace)
spc1 : integer or string
An atom ID or atom species to be replaced.
If it is integer, the atom specified by SPC1 is replaced.
If it is string, NUM atoms of species SPC1 are to be replaced with atoms of SPC2.
spc2 : integer or string
An atom ID to be replaced with spcs1, which is available only if spcs1 is integer.
Atom species that replace atoms of spc1.
num : integer, optional
Number of atoms to be replaced.
Returns
-------
nsys_out : NAPSystem object
Result of the system in which atoms are substituted.
"""
if type(spc1) not in (int,str):
raise TypeError('SPC1 must be either int or str.')
if type(spc2) not in (int,str):
raise TypeError('SPC2 must be either int or str.')
if type(spc2) is int and type(spc1) is not int:
raise ValueError('SPC2 must be string if SPCS1 is string.')
if not isinstance(nsys0,nappy.napsys.NAPSystem):
raise ValueError('NSYS0 is not an instance of NAPSystem.')
import copy
nsys = copy.deepcopy(nsys0)
if type(spc1) is int:
if spc1 > nsys.num_atoms():
raise ValueError('An integer SPC1 is greater than num of atoms in NSYS0')
sid1 = nsys.atoms.sid[spc1]
if type(spc2) is int:
sid2 = nsys.atoms.sid[spc2]
if sid1 == sid2:
# do nothing
return nsys
else:
nsys.atoms.at[spc1,'sid'] = sid2
nsys.atoms.at[spc2,'sid'] = sid1
return nsys
else:
if spc2 not in nsys.specorder:
nsys.specorder.append(spc2)
sid2 = nsys.specorder.index(spc2) +1
if sid1 == sid2:
# do nothing
return nsys
else:
nsys.atoms.at[spc1,'sid'] = sid2
return nsys
else: # type(spc1) is str
import random
if spc1 not in nsys.specorder:
raise ValueError('The species {0:s} is not in the system.'.format(spc1))
sid1 = nsys.specorder.index(spc1) +1
if spc2 not in nsys.specorder:
nsys.specorder.append(spc2)
sid2 = nsys.specorder.index(spc2) +1
indices = nsys.atoms[nsys.atoms.sid == sid1].index.tolist()
for n in range(num):
if len(indices) == 0:
break
idx = random.choice(indices)
nsys.atoms.at[idx,'sid'] = sid2
indices.remove(idx)
return nsys
raise RuntimeError('Something is wrong.')
def insert(nsys0,num=1,spc=None,rule='maxmindist',num_trial=100):
"""
Add atoms into the sytem of reasonable positions.
Look for the reasonable position by Monte-Carlo simulation with Levy flight motion.
Parameters
----------
nsys0 : NAPSystem object
The system into which atoms are inserted (not inplace).
num : integer
Number of atoms to be inserted.
rule : str
The rule to be applied when placing atoms into the system.
- maxmindist: maximize min distance
num_trial : integer
Number of trials of insertion positions.
Returns
-------
nsys : NAPSystem object
The system in which atoms are inserted.
"""
if not isinstance(nsys0,nappy.napsys.NAPSystem):
raise ValueError('NSYS0 must be an instance of NAPSystem.')
if type(spc) is not str:
raise ValueError('SPC must be string.')
if num < 1:
raise ValueError('NUM must be greater than 0.')
if num_trial < 1:
raise ValueError('NUM_TRIAL must be greater than 0.')
import copy
#...Levy flight, see RK's memo about Cuckoo search
from scipy.special import gamma
beta = 1.5
betai = 1.0/beta
vsgm = 1.0
usgm = (gamma(1+beta)*np.sin(np.pi*beta/2)/ \
gamma((1+beta)/2)*beta*2.0**((beta-1)/2))**betai
nsys = copy.deepcopy(nsys0)
if spc not in nsys.specorder:
nsys.specorder.append(spc)
sid = nsys.specorder.index(spc) +1
hmat = nsys.get_hmat()
for n in range(num):
pi = np.random.rand(3)
pi_prev = copy.copy(pi)
d2min_prev = 0.0
for it in range(num_trial):
#...Move the position using Levy flight
for ixyz in range(3):
p = pi_prev[ixyz]
u = np.random.normal() *usgm
v = max( abs(np.random.normal()*vsgm), 1.0e-8 )
w = u /v**betai
zeta = 0.01 *w
p += zeta*np.random.normal()
if p < 0.0:
p += 1.0
elif p >= 1.0:
p -= 1.0
pi[ixyz] = p
#...Calc minimum distance
d2min = 1.0e+30
for j in range(len(nsys.atoms)):
pj = nsys.atoms.pos[j]
xij = pj -pi
xij = xij -np.round(xij)
rij = np.dot(hmat,xij)
dij2 = rij[0]**2 +rij[1]**2 +rij[2]**2
d2min = min(d2min,dij2)
#...Decide whether or not to employ the new position
# print('pi=',pi,', d2min,d2min_prev=',d2min,d2min_prev)
if d2min > d2min_prev:
pi_prev = copy.copy(pi)
d2min_prev = d2min
#...Insert the atom at the position
symbols = [spc]
poss = [pi_prev]
vels = [[0., 0., 0.]]
frcs = [[0., 0., 0.]]
# print('Atom added to {0:6.3f} {1:6.3f} {2:6.3f}, dist={3:.3f}'.format(*pi_prev,np.sqrt(d2min)))
nsys.add_atoms(symbols,poss,vels,frcs)
return nsys
def replicate(nsys0,n1o,n2o,n3o,n1m=0,n2m=0,n3m=0):
"""
Return the system multiplied by n1o,n2o,n3o.
"""
import pandas as pd
nsys = copy.deepcopy(nsys0)
#...Convert to int
n1 = int(n1o)
n2 = int(n2o)
n3 = int(n3o)
if n1 == 0: n1 = 1
if n2 == 0: n2 = 1
if n3 == 0: n3 = 1
if n1 == n2 == n3 == 1:
return None
#...unit vectors to be repeated
m1 = n1-n1m
m2 = n2-n2m
m3 = n3-n3m
nsys.a1= nsys.a1*m1
nsys.a2= nsys.a2*m2
nsys.a3= nsys.a3*m3
#n123= m1*m2*m3
maxsid = nsys.atoms.sid.max()
#natm0= nsys.num_atoms()
# atoms0= copy.copy(nsys.atoms)
newnatm = len(nsys.atoms) *m1*m2*m3
newsids = [ 0 for i in range(newnatm) ]
newposs = [ np.zeros(3) for i in range(newnatm) ]
newvels = [ np.zeros(3) for i in range(newnatm) ]
newfrcs = [ np.zeros(3) for i in range(newnatm) ]
colnames = list(nsys.atoms.columns)
#...Labels except (sid,pos,vel,frc) are all auxiliary data
auxnames = colnames.copy()
auxnames.remove('sid')
auxnames.remove('pos')
auxnames.remove('vel')
auxnames.remove('frc')
newauxs = {}
for auxname in auxnames:
newauxs[auxname] = []
inc = 0
poss = nsys.atoms.pos
for i1 in range(n1m,n1):
for i2 in range(n2m,n2):
for i3 in range(n3m,n3):
for i0 in range(len(nsys.atoms)):
pi0 = poss[i0]
x= pi0[0]/m1 +1.0/m1*i1
y= pi0[1]/m2 +1.0/m2*i2
z= pi0[2]/m3 +1.0/m3*i3
newsids[inc] = nsys.atoms.sid[i0]
newposs[inc][:] = [x,y,z]
newvels[inc][:] = nsys.atoms.vel[i0]
newfrcs[inc][:] = nsys.atoms.frc[i0]
for auxname in auxnames:
newauxs[auxname].append(nsys.atoms.loc[i0,auxname])
inc += 1
#...Use DataFrame nsys.atoms
nsys.atoms = pd.DataFrame(columns=colnames)
nsys.atoms['pos'] = newposs
nsys.atoms['vel'] = newvels
nsys.atoms['frc'] = newfrcs
nsys.atoms['sid'] = newsids
for auxname in auxnames:
nsys.atoms[auxname] = newauxs[auxname]
return nsys
def change_cell(nsys0,X0):
"""
Change the cell to the new one whose lattice vectors are given by,
(anew,bnew,cnew) = (a,b,c)*X0
where a,b,c are column vectors that consist the original cell.
And the atoms are reduced to those within the new cell.
X is 3x3 matrix whose components are integer.
"""
if X0.dtype != int:
raise TypeError('X0.dtype is wrong.')
if X0.shape != (3,3):
raise TypeError('X0 has wrong shape.')
X = np.array(X0,dtype=float)
ncp = np.zeros(3,dtype=int)
ncp[0] = X0[0,:].max()
ncp[1] = X0[1,:].max()
ncp[2] = X0[2,:].max()
nsys = replicate(nsys0,ncp[0],ncp[1],ncp[2])
hmat0 = nsys0.get_hmat()
hmat = np.dot(hmat0,X0)
sposs = nsys.get_scaled_positions()
spnews = np.array(sposs)
nsys.set_hmat(hmat)
#...Since hmat0 is that of extended system,
#...X should correspond to it.
X[0,:] /= ncp[0]
X[1,:] /= ncp[1]
X[2,:] /= ncp[2]
Xi = np.linalg.inv(X)
for i,p in enumerate(sposs):
pnew = np.dot(Xi,p)
for l in range(3):
pnew[l] = nappy.util.pbc(pnew[l])
spnews[i,:] = pnew[:]
nsys.set_scaled_positions(spnews)
return nsys
if __name__ == "__main__":
args = docopt(__doc__)
print('manipulate.py does nothing...')
| mit |
boyuhou/excel-scanner | iqfeed/tools.py | 1 | 2757 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from functools import wraps
import logging
import time
import pandas as pd
log = logging.getLogger(__name__)
def retry(tries, exceptions=None, delay=0):
"""
Decorator for retrying a function if exception occurs
Source: https://gist.github.com/treo/728327
tries -- num tries
exceptions -- exceptions to catch
delay -- wait between retries
"""
exceptions = exceptions or (Exception, )
def _retry(fn):
@wraps(fn)
def __retry(*args, **kwargs):
for _ in range(tries+1):
try:
return fn(*args, **kwargs)
except exceptions as e:
log.warning("Exception, retrying...", exc_info=e)
time.sleep(delay)
raise # If no success after tries raise last exception
return __retry
return _retry
def bars_to_dateframe(bars, tz):
"""Creates dataframe from list of Bar instances"""
rows = [{'DateTime': bar.datetime,
'Open': bar.open,
'High': bar.high,
'Low': bar.low,
'Close': bar.close,
'Volume': bar.volume,
} for bar in bars]
return pd.DataFrame.from_records(rows).set_index(['DateTime']).sort_index()
def tick_bars_to_dateframe(bars):
rows = [{
'DateTime': bar.datetime,
'Last': bar.last,
'LastSize': bar.last_size,
'Volume': bar.volume,
'Bid': bar.bid,
'Ask': bar.ask,
'TicketID': bar.ticket_id,
} for bar in bars]
return pd.DataFrame.from_records(rows).set_index(['DateTime']).sort_index()
def get_instruments_from_file(filename):
"""Load index from txt file"""
instruments = []
with open(filename, 'r') as f:
for instrument in f:
instruments.append(instrument.rstrip())
if len(instruments) > 0:
instruments = instruments[1:]
return instruments
| apache-2.0 |
zhongyx12/Faster-RCNN-Refinement | faster_rcnn/roi_data_layer/minibatch2.py | 5 | 13350 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
import cv2
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
# <<<< obsolete
from ..utils.blob import prep_im_for_blob, im_list_to_blob
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
if cfg.IS_MULTISCALE:
im_blob, im_scales = _get_image_blob_multiscale(roidb)
else:
# Get the input image blob, formatted for caffe
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES_BASE), size=num_images)
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
if cfg.TRAIN.HAS_RPN:
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['im_info'] = np.array(
[[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],
dtype=np.float32)
else:
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_inside_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
# all_overlaps = []
for im_i in xrange(num_images):
labels, overlaps, im_rois, bbox_targets, bbox_inside_weights, sublabels \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image, num_classes)
# Add to RoIs blob
if cfg.IS_MULTISCALE:
if cfg.IS_EXTRAPOLATING:
rois, levels = _project_im_rois_multiscale(im_rois, cfg.TRAIN.SCALES)
batch_ind = im_i * len(cfg.TRAIN.SCALES) + levels
else:
rois, levels = _project_im_rois_multiscale(im_rois, cfg.TRAIN.SCALES_BASE)
batch_ind = im_i * len(cfg.TRAIN.SCALES_BASE) + levels
else:
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_inside_blob = np.vstack((bbox_inside_blob, bbox_inside_weights))
# all_overlaps = np.hstack((all_overlaps, overlaps))
# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps, sublabels_blob, view_targets_blob, view_inside_blob)
# _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps, sublabels_blob)
blobs['rois'] = rois_blob
blobs['labels'] = labels_blob
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_inside_weights'] = bbox_inside_blob
blobs['bbox_outside_weights'] = np.array(bbox_inside_blob > 0).astype(np.float32)
return blobs
def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = []
for i in xrange(1, num_classes):
fg_inds.extend(np.where((labels == i) & (overlaps >= cfg.TRAIN.FG_THRESH))[0])
fg_inds = np.array(fg_inds)
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image,
replace=False)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = []
for i in xrange(1, num_classes):
bg_inds.extend( np.where((labels == i) & (overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0] )
if len(bg_inds) < bg_rois_per_this_image:
for i in xrange(1, num_classes):
bg_inds.extend( np.where((labels == i) & (overlaps < cfg.TRAIN.BG_THRESH_HI))[0] )
if len(bg_inds) < bg_rois_per_this_image:
bg_inds.extend( np.where(overlaps < cfg.TRAIN.BG_THRESH_HI)[0] )
bg_inds = np.array(bg_inds, dtype=np.int32)
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image,
replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds).astype(int)
# print '{} foregrounds and {} backgrounds'.format(fg_inds.size, bg_inds.size)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]
sublabels = sublabels[keep_inds]
sublabels[fg_rois_per_this_image:] = 0
bbox_targets, bbox_loss_weights = \
_get_bbox_regression_labels(roidb['bbox_targets'][keep_inds, :],
num_classes)
if cfg.TRAIN.VIEWPOINT or cfg.TEST.VIEWPOINT:
viewpoints = viewpoints[keep_inds]
view_targets, view_loss_weights = \
_get_viewpoint_estimation_labels(viewpoints, labels, num_classes)
return labels, overlaps, rois, bbox_targets, bbox_loss_weights, sublabels, view_targets, view_loss_weights
return labels, overlaps, rois, bbox_targets, bbox_loss_weights, sublabels
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_scale = cfg.TRAIN.SCALES_BASE[scale_inds[i]]
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _get_image_blob_multiscale(roidb):
"""Builds an input blob from the images in the roidb at multiscales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
scales = cfg.TRAIN.SCALES_BASE
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
for im_scale in scales:
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _project_im_rois_multiscale(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
scales = np.array(scales)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_loss_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_loss_weights[ind, start:end] = [1., 1., 1., 1.]
return bbox_targets, bbox_loss_weights
def _get_viewpoint_estimation_labels(viewpoint_data, clss, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
view_target_data (ndarray): N x 3K blob of regression targets
view_loss_weights (ndarray): N x 3K blob of loss weights
"""
view_targets = np.zeros((clss.size, 3 * num_classes), dtype=np.float32)
view_loss_weights = np.zeros(view_targets.shape, dtype=np.float32)
inds = np.where( (clss > 0) & np.isfinite(viewpoint_data[:,0]) & np.isfinite(viewpoint_data[:,1]) & np.isfinite(viewpoint_data[:,2]) )[0]
for ind in inds:
cls = clss[ind]
start = 3 * cls
end = start + 3
view_targets[ind, start:end] = viewpoint_data[ind, :]
view_loss_weights[ind, start:end] = [1., 1., 1.]
assert not np.isinf(view_targets).any(), 'viewpoint undefined'
return view_targets, view_loss_weights
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps, sublabels_blob, view_targets_blob=None, view_inside_blob=None):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
import math
for i in xrange(min(rois_blob.shape[0], 10)):
rois = rois_blob[i, :]
im_ind = rois[0]
roi = rois[1:]
im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
cls = labels_blob[i]
subcls = sublabels_blob[i]
plt.imshow(im)
print 'class: ', cls, ' subclass: ', subcls, ' overlap: ', overlaps[i]
start = 3 * cls
end = start + 3
# print 'view: ', view_targets_blob[i, start:end] * 180 / math.pi
# print 'view weights: ', view_inside_blob[i, start:end]
plt.gca().add_patch(
plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
| mit |
wasit7/book_pae | pae/forcast/src/convert_allsub_tojson.py | 2 | 1651 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 23:20:29 2016
@author: Methinee
"""
import pandas as pd
import json
from collections import defaultdict
countEachSubSort = 0
key_sub_sort = defaultdict(list)
subjects = []
countSub = 0
node = []
link= []
out={}
sources=[]
targets=[]
df_file = pd.read_csv('../data/df_dropSub_less20.csv',delimiter=",", skip_blank_lines = True,
error_bad_lines=False,encoding='utf8')
headers=list(df_file.columns.values)
for sub in df_file['3COURSEID']:
if sub not in subjects:
subjects.append(sub)
# print "%s, index is %d"%(sub,subjects.index(sub))
countSub = countSub+1
node.append({"name":sub})
subjects.remove('CS231')
node.remove({"name":'CS231'})
subjects.sort()
node.sort()
# Find index of source and target from book/graph1.gv
df_st = pd.read_csv('../data/source-target_predict.csv',delimiter=";", skip_blank_lines = True,
error_bad_lines=False)
headers_st=list(df_st.columns.values)
df_st = df_st.dropna()
for source in df_st[headers_st[0]]:
#print "source is %s, index is %d"%(source,subjects_db.index(source))
sources.append(subjects.index(source))
for target in df_st[headers_st[1]]:
#print "target is %s, index is %d"%(target,subjects_db.index(target))
targets.append(subjects.index(target))
for i in xrange(0,62): #In Bachelor has 70 links
link.append({"source":sources[i],"target":targets[i],"type": "licensing"})
out["node"]=node
out["link"]=link
#with open("subjects_111.json","w") as outfile:
# json.dump(out,outfile,sort_keys=True, indent=4, separators=(',',': '))
| mit |
MechCoder/sympy | sympy/physics/quantum/tensorproduct.py | 64 | 13572 | """Abstract tensor product."""
from __future__ import print_function, division
from sympy import Expr, Add, Mul, Matrix, Pow, sympify
from sympy.core.compatibility import u, range
from sympy.core.trace import Tr
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.qexpr import QuantumError
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.state import Ket, Bra
from sympy.physics.quantum.matrixutils import (
numpy_ndarray,
scipy_sparse_matrix,
matrix_tensor_product
)
__all__ = [
'TensorProduct',
'tensor_product_simp'
]
#-----------------------------------------------------------------------------
# Tensor product
#-----------------------------------------------------------------------------
_combined_printing = False
def combined_tensor_printing(combined):
"""Set flag controlling whether tensor products of states should be
printed as a combined bra/ket or as an explicit tensor product of different
bra/kets. This is a global setting for all TensorProduct class instances.
Parameters
----------
combine : bool
When true, tensor product states are combined into one ket/bra, and
when false explicit tensor product notation is used between each
ket/bra.
"""
global _combined_printing
_combined_printing = combined
class TensorProduct(Expr):
"""The tensor product of two or more arguments.
For matrices, this uses ``matrix_tensor_product`` to compute the Kronecker
or tensor product matrix. For other objects a symbolic ``TensorProduct``
instance is returned. The tensor product is a non-commutative
multiplication that is used primarily with operators and states in quantum
mechanics.
Currently, the tensor product distinguishes between commutative and non-
commutative arguments. Commutative arguments are assumed to be scalars and
are pulled out in front of the ``TensorProduct``. Non-commutative arguments
remain in the resulting ``TensorProduct``.
Parameters
==========
args : tuple
A sequence of the objects to take the tensor product of.
Examples
========
Start with a simple tensor product of sympy matrices::
>>> from sympy import I, Matrix, symbols
>>> from sympy.physics.quantum import TensorProduct
>>> m1 = Matrix([[1,2],[3,4]])
>>> m2 = Matrix([[1,0],[0,1]])
>>> TensorProduct(m1, m2)
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2],
[3, 0, 4, 0],
[0, 3, 0, 4]])
>>> TensorProduct(m2, m1)
Matrix([
[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 1, 2],
[0, 0, 3, 4]])
We can also construct tensor products of non-commutative symbols:
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> tp = TensorProduct(A, B)
>>> tp
AxB
We can take the dagger of a tensor product (note the order does NOT reverse
like the dagger of a normal product):
>>> from sympy.physics.quantum import Dagger
>>> Dagger(tp)
Dagger(A)xDagger(B)
Expand can be used to distribute a tensor product across addition:
>>> C = Symbol('C',commutative=False)
>>> tp = TensorProduct(A+B,C)
>>> tp
(A + B)xC
>>> tp.expand(tensorproduct=True)
AxC + BxC
"""
is_commutative = False
def __new__(cls, *args):
if isinstance(args[0], (Matrix, numpy_ndarray, scipy_sparse_matrix)):
return matrix_tensor_product(*args)
c_part, new_args = cls.flatten(sympify(args))
c_part = Mul(*c_part)
if len(new_args) == 0:
return c_part
elif len(new_args) == 1:
return c_part * new_args[0]
else:
tp = Expr.__new__(cls, *new_args)
return c_part * tp
@classmethod
def flatten(cls, args):
# TODO: disallow nested TensorProducts.
c_part = []
nc_parts = []
for arg in args:
cp, ncp = arg.args_cnc()
c_part.extend(list(cp))
nc_parts.append(Mul._from_args(ncp))
return c_part, nc_parts
def _eval_adjoint(self):
return TensorProduct(*[Dagger(i) for i in self.args])
def _eval_rewrite(self, pattern, rule, **hints):
sargs = self.args
terms = [t._eval_rewrite(pattern, rule, **hints) for t in sargs]
return TensorProduct(*terms).expand(tensorproduct=True)
def _sympystr(self, printer, *args):
from sympy.printing.str import sstr
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + '('
s = s + sstr(self.args[i])
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + ')'
if i != length - 1:
s = s + 'x'
return s
def _pretty(self, printer, *args):
if (_combined_printing and
(all([isinstance(arg, Ket) for arg in self.args]) or
all([isinstance(arg, Bra) for arg in self.args]))):
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print('', *args)
length_i = len(self.args[i].args)
for j in range(length_i):
part_pform = printer._print(self.args[i].args[j], *args)
next_pform = prettyForm(*next_pform.right(part_pform))
if j != length_i - 1:
next_pform = prettyForm(*next_pform.right(', '))
if len(self.args[i].args) > 1:
next_pform = prettyForm(
*next_pform.parens(left='{', right='}'))
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
pform = prettyForm(*pform.right(',' + ' '))
pform = prettyForm(*pform.left(self.args[0].lbracket))
pform = prettyForm(*pform.right(self.args[0].rbracket))
return pform
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print(self.args[i], *args)
if isinstance(self.args[i], (Add, Mul)):
next_pform = prettyForm(
*next_pform.parens(left='(', right=')')
)
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
if printer._use_unicode:
pform = prettyForm(*pform.right(u('\N{N-ARY CIRCLED TIMES OPERATOR}') + u(' ')))
else:
pform = prettyForm(*pform.right('x' + ' '))
return pform
def _latex(self, printer, *args):
if (_combined_printing and
(all([isinstance(arg, Ket) for arg in self.args]) or
all([isinstance(arg, Bra) for arg in self.args]))):
def _label_wrap(label, nlabels):
return label if nlabels == 1 else r"\left\{%s\right\}" % label
s = r", ".join([_label_wrap(arg._print_label_latex(printer, *args),
len(arg.args)) for arg in self.args])
return r"{%s%s%s}" % (self.args[0].lbracket_latex, s,
self.args[0].rbracket_latex)
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\left('
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
s = s + '{' + printer._print(self.args[i], *args) + '}'
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\right)'
if i != length - 1:
s = s + '\\otimes '
return s
def doit(self, **hints):
return TensorProduct(*[item.doit(**hints) for item in self.args])
def _eval_expand_tensorproduct(self, **hints):
"""Distribute TensorProducts across addition."""
args = self.args
add_args = []
stop = False
for i in range(len(args)):
if isinstance(args[i], Add):
for aa in args[i].args:
tp = TensorProduct(*args[:i] + (aa,) + args[i + 1:])
if isinstance(tp, TensorProduct):
tp = tp._eval_expand_tensorproduct()
add_args.append(tp)
break
if add_args:
return Add(*add_args)
else:
return self
def _eval_trace(self, **kwargs):
indices = kwargs.get('indices', None)
exp = tensor_product_simp(self)
if indices is None or len(indices) == 0:
return Mul(*[Tr(arg).doit() for arg in exp.args])
else:
return Mul(*[Tr(value).doit() if idx in indices else value
for idx, value in enumerate(exp.args)])
def tensor_product_simp_Mul(e):
"""Simplify a Mul with TensorProducts.
Current the main use of this is to simplify a ``Mul`` of ``TensorProduct``s
to a ``TensorProduct`` of ``Muls``. It currently only works for relatively
simple cases where the initial ``Mul`` only has scalars and raw
``TensorProduct``s, not ``Add``, ``Pow``, ``Commutator``s of
``TensorProduct``s.
Parameters
==========
e : Expr
A ``Mul`` of ``TensorProduct``s to be simplified.
Returns
=======
e : Expr
A ``TensorProduct`` of ``Mul``s.
Examples
========
This is an example of the type of simplification that this function
performs::
>>> from sympy.physics.quantum.tensorproduct import \
tensor_product_simp_Mul, TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp_Mul(e)
(A*C)x(B*D)
"""
# TODO: This won't work with Muls that have other composites of
# TensorProducts, like an Add, Pow, Commutator, etc.
# TODO: This only works for the equivalent of single Qbit gates.
if not isinstance(e, Mul):
return e
c_part, nc_part = e.args_cnc()
n_nc = len(nc_part)
if n_nc == 0 or n_nc == 1:
return e
elif e.has(TensorProduct):
current = nc_part[0]
if not isinstance(current, TensorProduct):
raise TypeError('TensorProduct expected, got: %r' % current)
n_terms = len(current.args)
new_args = list(current.args)
for next in nc_part[1:]:
# TODO: check the hilbert spaces of next and current here.
if isinstance(next, TensorProduct):
if n_terms != len(next.args):
raise QuantumError(
'TensorProducts of different lengths: %r and %r' %
(current, next)
)
for i in range(len(new_args)):
new_args[i] = new_args[i] * next.args[i]
else:
# this won't quite work as we don't want next in the
# TensorProduct
for i in range(len(new_args)):
new_args[i] = new_args[i] * next
current = next
return Mul(*c_part) * TensorProduct(*new_args)
else:
return e
def tensor_product_simp(e, **hints):
"""Try to simplify and combine TensorProducts.
In general this will try to pull expressions inside of ``TensorProducts``.
It currently only works for relatively simple cases where the products have
only scalars, raw ``TensorProducts``, not ``Add``, ``Pow``, ``Commutators``
of ``TensorProducts``. It is best to see what it does by showing examples.
Examples
========
>>> from sympy.physics.quantum import tensor_product_simp
>>> from sympy.physics.quantum import TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
First see what happens to products of tensor products:
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp(e)
(A*C)x(B*D)
This is the core logic of this function, and it works inside, powers, sums,
commutators and anticommutators as well:
>>> tensor_product_simp(e**2)
(A*C)x(B*D)**2
"""
if isinstance(e, Add):
return Add(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, Pow):
return tensor_product_simp(e.base) ** e.exp
elif isinstance(e, Mul):
return tensor_product_simp_Mul(e)
elif isinstance(e, Commutator):
return Commutator(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, AntiCommutator):
return AntiCommutator(*[tensor_product_simp(arg) for arg in e.args])
else:
return e
| bsd-3-clause |
rvraghav93/scikit-learn | examples/neural_networks/plot_mlp_alpha.py | 47 | 4159 | """
================================================
Varying regularization in Multi-layer Perceptron
================================================
A comparison of different values for regularization parameter 'alpha' on
synthetic datasets. The plot shows that different alphas yield different
decision functions.
Alpha is a parameter for regularization term, aka penalty term, that combats
overfitting by constraining the size of the weights. Increasing alpha may fix
high variance (a sign of overfitting) by encouraging smaller weights, resulting
in a decision boundary plot that appears with lesser curvatures.
Similarly, decreasing alpha may fix high bias (a sign of underfitting) by
encouraging larger weights, potentially resulting in a more complicated
decision boundary.
"""
print(__doc__)
# Author: Issam H. Laradji
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
h = .02 # step size in the mesh
alphas = np.logspace(-5, 3, 5)
names = []
for i in alphas:
names.append('alpha ' + str(i))
classifiers = []
for i in alphas:
classifiers.append(MLPClassifier(alpha=i, random_state=1))
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=0, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable]
figure = plt.figure(figsize=(17, 9))
i = 1
# iterate over datasets
for X, y in datasets:
# preprocess dataset, split into training and test part
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='black', s=25)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6, edgecolors='black', s=25)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
dmargala/qusp | examples/hist_lya_absorbers.py | 1 | 3214 | #!/usr/bin/env python
"""
"""
import argparse
import qusp
import numpy as np
import matplotlib.pyplot as plt
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--verbose", action="store_true",
help="more verbose output")
parser.add_argument("--forest-min", type=float, default=1040,
help="wavelength of lya forest min")
parser.add_argument("--forest-max", type=float, default=1200,
help="wavelength of lya forest max")
parser.add_argument("--wave-lya", type=float, default=1216,
help="wavelength of lya line")
parser.add_argument("--z-col", type=int, default=1,
help="redshift column index")
parser.add_argument("--output", type=str, default="absorber_redshifts.png",
help="output file name")
parser.add_argument("--unweighted", action="store_true",
help="don't use ivar weighting")
qusp.Paths.add_args(parser)
qusp.target.add_args(parser)
args = parser.parse_args()
# setup boss data directory path
paths = qusp.Paths(**qusp.Paths.from_args(args))
# read target list
target_list = qusp.target.load_target_list_from_args(args,
fields=[('z', float, args.z_col)])
forest_min = qusp.wavelength.Wavelength(args.forest_min)
forest_max = qusp.wavelength.Wavelength(args.forest_max)
wave_lya = qusp.wavelength.Wavelength(args.wave_lya)
absorber_redshifts = []
absorber_weights = []
# loop over targets
for target, combined in qusp.target.get_combined_spectra(target_list, boss_path=paths.boss_path):
# determine observed frame forest window
obs_forest_min = forest_min.observed(target['z'])
obs_forest_max = forest_max.observed(target['z'])
# trim the combined spectrum to the forest window
try:
forest = combined.trim_range(obs_forest_min, obs_forest_max)
except ValueError, e:
# skip target if it's forest is not observable
print e, '(z = %.2f)' % target['z']
continue
# calculate absorber redshifts and weights
absorber_z = forest.wavelength/wave_lya - 1
absorber_weight = forest.ivar.values
# save this absorbers for this target
absorber_redshifts.append(absorber_z)
absorber_weights.append(absorber_weight)
absorber_redshifts = np.concatenate(absorber_redshifts)
if args.unweighted:
absorber_weights = np.ones_like(absorber_redshifts)
else:
absorber_weights = np.concatenate(absorber_weights)
if args.verbose:
print 'Number of absorbers: %d' % absorber_redshifts.shape[0]
print 'Mean number per target: %.2f' % (absorber_redshifts.shape[0]/len(target_list))
print 'Mean absorber redshift: %.4f' % np.mean(absorber_redshifts)
if args.output:
fig = plt.figure(figsize=(8,6))
plt.hist(absorber_redshifts, weights=absorber_weights, bins=50, linewidth=.1, alpha=.5)
plt.xlabel(r'Absorber Redshifts')
plt.grid()
fig.savefig(args.output, bbox_inches='tight')
if __name__ == '__main__':
main()
| mit |
marcoantoniooliveira/labweb | oscar/lib/python2.7/site-packages/IPython/kernel/zmq/kernelapp.py | 2 | 18662 | """An Application for launching a kernel
Authors
-------
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Standard library imports
import atexit
import json
import os
import sys
import signal
# System library imports
import zmq
from zmq.eventloop import ioloop
from zmq.eventloop.zmqstream import ZMQStream
# IPython imports
from IPython.core.ultratb import FormattedTB
from IPython.core.application import (
BaseIPythonApplication, base_flags, base_aliases, catch_config_error
)
from IPython.core.profiledir import ProfileDir
from IPython.core.shellapp import (
InteractiveShellApp, shell_flags, shell_aliases
)
from IPython.utils import io
from IPython.utils.localinterfaces import LOCALHOST
from IPython.utils.path import filefind
from IPython.utils.py3compat import str_to_bytes
from IPython.utils.traitlets import (
Any, Instance, Dict, Unicode, Integer, Bool, CaselessStrEnum,
DottedObjectName,
)
from IPython.utils.importstring import import_item
from IPython.kernel import write_connection_file
# local imports
from heartbeat import Heartbeat
from ipkernel import Kernel
from parentpoller import ParentPollerUnix, ParentPollerWindows
from session import (
Session, session_flags, session_aliases, default_secure,
)
from zmqshell import ZMQInteractiveShell
#-----------------------------------------------------------------------------
# Flags and Aliases
#-----------------------------------------------------------------------------
kernel_aliases = dict(base_aliases)
kernel_aliases.update({
'ip' : 'IPKernelApp.ip',
'hb' : 'IPKernelApp.hb_port',
'shell' : 'IPKernelApp.shell_port',
'iopub' : 'IPKernelApp.iopub_port',
'stdin' : 'IPKernelApp.stdin_port',
'control' : 'IPKernelApp.control_port',
'f' : 'IPKernelApp.connection_file',
'parent': 'IPKernelApp.parent_handle',
'transport': 'IPKernelApp.transport',
})
if sys.platform.startswith('win'):
kernel_aliases['interrupt'] = 'IPKernelApp.interrupt'
kernel_flags = dict(base_flags)
kernel_flags.update({
'no-stdout' : (
{'IPKernelApp' : {'no_stdout' : True}},
"redirect stdout to the null device"),
'no-stderr' : (
{'IPKernelApp' : {'no_stderr' : True}},
"redirect stderr to the null device"),
'pylab' : (
{'IPKernelApp' : {'pylab' : 'auto'}},
"""Pre-load matplotlib and numpy for interactive use with
the default matplotlib backend."""),
})
# inherit flags&aliases for any IPython shell apps
kernel_aliases.update(shell_aliases)
kernel_flags.update(shell_flags)
# inherit flags&aliases for Sessions
kernel_aliases.update(session_aliases)
kernel_flags.update(session_flags)
_ctrl_c_message = """\
NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work.
To exit, you will have to explicitly quit this process, by either sending
"quit" from a client, or using Ctrl-\\ in UNIX-like environments.
To read more about this, see https://github.com/ipython/ipython/issues/2049
"""
#-----------------------------------------------------------------------------
# Application class for starting an IPython Kernel
#-----------------------------------------------------------------------------
class IPKernelApp(BaseIPythonApplication, InteractiveShellApp):
name='ipkernel'
aliases = Dict(kernel_aliases)
flags = Dict(kernel_flags)
classes = [Kernel, ZMQInteractiveShell, ProfileDir, Session]
# the kernel class, as an importstring
kernel_class = DottedObjectName('IPython.kernel.zmq.ipkernel.Kernel', config=True,
help="""The Kernel subclass to be used.
This should allow easy re-use of the IPKernelApp entry point
to configure and launch kernels other than IPython's own.
""")
kernel = Any()
poller = Any() # don't restrict this even though current pollers are all Threads
heartbeat = Instance(Heartbeat)
session = Instance('IPython.kernel.zmq.session.Session')
ports = Dict()
# ipkernel doesn't get its own config file
def _config_file_name_default(self):
return 'ipython_config.py'
# inherit config file name from parent:
parent_appname = Unicode(config=True)
def _parent_appname_changed(self, name, old, new):
if self.config_file_specified:
# it was manually specified, ignore
return
self.config_file_name = new.replace('-','_') + u'_config.py'
# don't let this count as specifying the config file
self.config_file_specified.remove(self.config_file_name)
# connection info:
transport = CaselessStrEnum(['tcp', 'ipc'], default_value='tcp', config=True)
ip = Unicode(config=True,
help="Set the IP or interface on which the kernel will listen.")
def _ip_default(self):
if self.transport == 'ipc':
if self.connection_file:
return os.path.splitext(self.abs_connection_file)[0] + '-ipc'
else:
return 'kernel-ipc'
else:
return LOCALHOST
hb_port = Integer(0, config=True, help="set the heartbeat port [default: random]")
shell_port = Integer(0, config=True, help="set the shell (ROUTER) port [default: random]")
iopub_port = Integer(0, config=True, help="set the iopub (PUB) port [default: random]")
stdin_port = Integer(0, config=True, help="set the stdin (ROUTER) port [default: random]")
control_port = Integer(0, config=True, help="set the control (ROUTER) port [default: random]")
connection_file = Unicode('', config=True,
help="""JSON file in which to store connection info [default: kernel-<pid>.json]
This file will contain the IP, ports, and authentication key needed to connect
clients to this kernel. By default, this file will be created in the security dir
of the current profile, but can be specified by absolute path.
""")
@property
def abs_connection_file(self):
if os.path.basename(self.connection_file) == self.connection_file:
return os.path.join(self.profile_dir.security_dir, self.connection_file)
else:
return self.connection_file
# streams, etc.
no_stdout = Bool(False, config=True, help="redirect stdout to the null device")
no_stderr = Bool(False, config=True, help="redirect stderr to the null device")
outstream_class = DottedObjectName('IPython.kernel.zmq.iostream.OutStream',
config=True, help="The importstring for the OutStream factory")
displayhook_class = DottedObjectName('IPython.kernel.zmq.displayhook.ZMQDisplayHook',
config=True, help="The importstring for the DisplayHook factory")
# polling
parent_handle = Integer(0, config=True,
help="""kill this process if its parent dies. On Windows, the argument
specifies the HANDLE of the parent process, otherwise it is simply boolean.
""")
interrupt = Integer(0, config=True,
help="""ONLY USED ON WINDOWS
Interrupt this process when the parent is signaled.
""")
def init_crash_handler(self):
# Install minimal exception handling
sys.excepthook = FormattedTB(mode='Verbose', color_scheme='NoColor',
ostream=sys.__stdout__)
def init_poller(self):
if sys.platform == 'win32':
if self.interrupt or self.parent_handle:
self.poller = ParentPollerWindows(self.interrupt, self.parent_handle)
elif self.parent_handle:
self.poller = ParentPollerUnix()
def _bind_socket(self, s, port):
iface = '%s://%s' % (self.transport, self.ip)
if self.transport == 'tcp':
if port <= 0:
port = s.bind_to_random_port(iface)
else:
s.bind("tcp://%s:%i" % (self.ip, port))
elif self.transport == 'ipc':
if port <= 0:
port = 1
path = "%s-%i" % (self.ip, port)
while os.path.exists(path):
port = port + 1
path = "%s-%i" % (self.ip, port)
else:
path = "%s-%i" % (self.ip, port)
s.bind("ipc://%s" % path)
return port
def load_connection_file(self):
"""load ip/port/hmac config from JSON connection file"""
try:
fname = filefind(self.connection_file, ['.', self.profile_dir.security_dir])
except IOError:
self.log.debug("Connection file not found: %s", self.connection_file)
# This means I own it, so I will clean it up:
atexit.register(self.cleanup_connection_file)
return
self.log.debug(u"Loading connection file %s", fname)
with open(fname) as f:
s = f.read()
cfg = json.loads(s)
self.transport = cfg.get('transport', self.transport)
if self.ip == self._ip_default() and 'ip' in cfg:
# not overridden by config or cl_args
self.ip = cfg['ip']
for channel in ('hb', 'shell', 'iopub', 'stdin', 'control'):
name = channel + '_port'
if getattr(self, name) == 0 and name in cfg:
# not overridden by config or cl_args
setattr(self, name, cfg[name])
if 'key' in cfg:
self.config.Session.key = str_to_bytes(cfg['key'])
def write_connection_file(self):
"""write connection info to JSON file"""
cf = self.abs_connection_file
self.log.debug("Writing connection file: %s", cf)
write_connection_file(cf, ip=self.ip, key=self.session.key, transport=self.transport,
shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port,
iopub_port=self.iopub_port, control_port=self.control_port)
def cleanup_connection_file(self):
cf = self.abs_connection_file
self.log.debug("Cleaning up connection file: %s", cf)
try:
os.remove(cf)
except (IOError, OSError):
pass
self.cleanup_ipc_files()
def cleanup_ipc_files(self):
"""cleanup ipc files if we wrote them"""
if self.transport != 'ipc':
return
for port in (self.shell_port, self.iopub_port, self.stdin_port, self.hb_port, self.control_port):
ipcfile = "%s-%i" % (self.ip, port)
try:
os.remove(ipcfile)
except (IOError, OSError):
pass
def init_connection_file(self):
if not self.connection_file:
self.connection_file = "kernel-%s.json"%os.getpid()
try:
self.load_connection_file()
except Exception:
self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
self.exit(1)
def init_sockets(self):
# Create a context, a session, and the kernel sockets.
self.log.info("Starting the kernel at pid: %i", os.getpid())
context = zmq.Context.instance()
# Uncomment this to try closing the context.
# atexit.register(context.term)
self.shell_socket = context.socket(zmq.ROUTER)
self.shell_port = self._bind_socket(self.shell_socket, self.shell_port)
self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port)
self.iopub_socket = context.socket(zmq.PUB)
self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port)
self.stdin_socket = context.socket(zmq.ROUTER)
self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port)
self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port)
self.control_socket = context.socket(zmq.ROUTER)
self.control_port = self._bind_socket(self.control_socket, self.control_port)
self.log.debug("control ROUTER Channel on port: %i" % self.control_port)
def init_heartbeat(self):
"""start the heart beating"""
# heartbeat doesn't share context, because it mustn't be blocked
# by the GIL, which is accessed by libzmq when freeing zero-copy messages
hb_ctx = zmq.Context()
self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port))
self.hb_port = self.heartbeat.port
self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port)
self.heartbeat.start()
def log_connection_info(self):
"""display connection info, and store ports"""
basename = os.path.basename(self.connection_file)
if basename == self.connection_file or \
os.path.dirname(self.connection_file) == self.profile_dir.security_dir:
# use shortname
tail = basename
if self.profile != 'default':
tail += " --profile %s" % self.profile
else:
tail = self.connection_file
lines = [
"To connect another client to this kernel, use:",
" --existing %s" % tail,
]
# log connection info
# info-level, so often not shown.
# frontends should use the %connect_info magic
# to see the connection info
for line in lines:
self.log.info(line)
# also raw print to the terminal if no parent_handle (`ipython kernel`)
if not self.parent_handle:
io.rprint(_ctrl_c_message)
for line in lines:
io.rprint(line)
self.ports = dict(shell=self.shell_port, iopub=self.iopub_port,
stdin=self.stdin_port, hb=self.hb_port,
control=self.control_port)
def init_session(self):
"""create our session object"""
default_secure(self.config)
self.session = Session(parent=self, username=u'kernel')
def init_blackhole(self):
"""redirects stdout/stderr to devnull if necessary"""
if self.no_stdout or self.no_stderr:
blackhole = open(os.devnull, 'w')
if self.no_stdout:
sys.stdout = sys.__stdout__ = blackhole
if self.no_stderr:
sys.stderr = sys.__stderr__ = blackhole
def init_io(self):
"""Redirect input streams and set a display hook."""
if self.outstream_class:
outstream_factory = import_item(str(self.outstream_class))
sys.stdout = outstream_factory(self.session, self.iopub_socket, u'stdout')
sys.stderr = outstream_factory(self.session, self.iopub_socket, u'stderr')
if self.displayhook_class:
displayhook_factory = import_item(str(self.displayhook_class))
sys.displayhook = displayhook_factory(self.session, self.iopub_socket)
def init_signal(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
def init_kernel(self):
"""Create the Kernel object itself"""
shell_stream = ZMQStream(self.shell_socket)
control_stream = ZMQStream(self.control_socket)
kernel_factory = import_item(str(self.kernel_class))
kernel = kernel_factory(parent=self, session=self.session,
shell_streams=[shell_stream, control_stream],
iopub_socket=self.iopub_socket,
stdin_socket=self.stdin_socket,
log=self.log,
profile_dir=self.profile_dir,
user_ns=self.user_ns,
)
kernel.record_ports(self.ports)
self.kernel = kernel
def init_gui_pylab(self):
"""Enable GUI event loop integration, taking pylab into account."""
# Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab`
# to ensure that any exception is printed straight to stderr.
# Normally _showtraceback associates the reply with an execution,
# which means frontends will never draw it, as this exception
# is not associated with any execute request.
shell = self.shell
_showtraceback = shell._showtraceback
try:
# replace pyerr-sending traceback with stderr
def print_tb(etype, evalue, stb):
print ("GUI event loop or pylab initialization failed",
file=io.stderr)
print (shell.InteractiveTB.stb2text(stb), file=io.stderr)
shell._showtraceback = print_tb
InteractiveShellApp.init_gui_pylab(self)
finally:
shell._showtraceback = _showtraceback
def init_shell(self):
self.shell = self.kernel.shell
self.shell.configurables.append(self)
@catch_config_error
def initialize(self, argv=None):
super(IPKernelApp, self).initialize(argv)
self.init_blackhole()
self.init_connection_file()
self.init_session()
self.init_poller()
self.init_sockets()
self.init_heartbeat()
# writing/displaying connection info must be *after* init_sockets/heartbeat
self.log_connection_info()
self.write_connection_file()
self.init_io()
self.init_signal()
self.init_kernel()
# shell init steps
self.init_path()
self.init_shell()
self.init_gui_pylab()
self.init_extensions()
self.init_code()
# flush stdout/stderr, so that anything written to these streams during
# initialization do not get associated with the first execution request
sys.stdout.flush()
sys.stderr.flush()
def start(self):
if self.poller is not None:
self.poller.start()
self.kernel.start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass
launch_new_instance = IPKernelApp.launch_instance
def main():
"""Run an IPKernel as an application"""
app = IPKernelApp.instance()
app.initialize()
app.start()
if __name__ == '__main__':
main()
| bsd-3-clause |
fyffyt/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
zorojean/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 206 | 7643 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
sbustreamspot/sbustreamspot-baselines | distributions/pmf.py | 1 | 5813 | #!/usr/bin/env python
# Copyright 2016 Emaad Ahmed Manzoor
# License: Apache License, Version 2.0
# https://github.com/sbustreamspot/sbustreamspot-baselines
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from params import *
from distribution import *
from utils import split_train_test, pr_curve
from sklearn.neighbors import KernelDensity
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import LeaveOneOut
from sklearn.metrics import precision_recall_curve, average_precision_score
import sys
import random
random.seed(SEED)
np.random.seed(SEED)
# arguments
feat_file = sys.argv[1]
feat_name = sys.argv[2]
binwidth = float(sys.argv[3])
train_frac = float(sys.argv[4])
bandwidth = -1.0
if len(sys.argv) > 5:
bandwidth = float(sys.argv[5])
# train-test split
train_gids, test_gids = split_train_test(BENIGN_SCENARIOS, MALICIOUS_SCENARIOS,
train_frac)
train_gids = set(train_gids)
test_gids = set(test_gids)
# collect feature values from file
train_feature_values = []
test_feature_values = []
with open(feat_file, 'r') as f:
# skip 3 header lines
next(f)
next(f)
next(f)
for line in f:
fields = line.strip().split('\t')
gid = int(fields[0])
if feat_name == "density":
feat_value = float(fields[1]) * 10000
else:
feat_value = float(fields[1])
scenario = gid/100
if gid in train_gids:
train_feature_values.append((gid, feat_value))
elif gid in test_gids:
test_feature_values.append((gid, feat_value))
else:
print "ERROR"
sys.exit(-1)
train_values = np.array(sorted([fval
for gid, fval
in train_feature_values])).reshape(-1,1)
# compute histogram
minval = min(train_values)
maxval = max(train_values)
nbins = (maxval - minval)/binwidth;
bins = np.arange(minval, maxval+binwidth+binwidth, binwidth)
a = np.arange(len(bins))
hist, _ = np.histogram(train_values, bins=bins)
nevents = len(bins) - 1
total = np.sum(hist)
pmf = {bins[i]: (float(hist[i]) + 1.0) / (total + nevents) for i in range(len(hist))}
#print hist
#print pmf
#print bins
"""
# plot histogram
colours = ["#348ABD", "#A60628"]
plt.figure(figsize=(16,4))
plt.hold(True)
plt.bar(left=bins[:-1], width=binwidth, height=pmf, color=colours[0],
label='PMF', alpha=0.6, edgecolor=colours[0], lw="3")
# plot rug data points
ymin, ymax = plt.ylim()
ycenter = (ymax - ymin) * 0.8
plt.plot(train_values, [ycenter]*len(train_values), '|', color='k',
label='Training feature values')
test_values = np.array([feat_val
for gid, feat_val in test_feature_values]).reshape(-1,1)
test_malicious_values = [feat_val for gid, feat_val in test_feature_values
if gid/100 in MALICIOUS_SCENARIOS]
test_benign_values = [feat_val for gid, feat_val in test_feature_values
if gid/100 in BENIGN_SCENARIOS]
plt.plot(test_benign_values, [ycenter]*len(test_benign_values), '|',
color=colours[0], label='Test feature values (benign)')
plt.plot(test_malicious_values, [ycenter]*len(test_malicious_values), '|',
color='red', label='Test feature values (malicious)')
plt.legend(loc='best')
plt.xlim(bins[0]-binwidth, bins[-1]+binwidth)
plt.xticks(bins, bins.flatten(), rotation=45)
plt.savefig('pmf-' + feat_name + '.pdf', bbox_inches='tight')
plt.clf()
plt.close()
"""
# scores for all points
for i, scenario in enumerate(MALICIOUS_SCENARIOS):
all_feature_values = train_feature_values + \
[(gid, feat_value)
for gid, feat_value in test_feature_values
if gid/100 in BENIGN_SCENARIOS or
gid/100 == scenario]
all_values = np.array([feat_value
for gid, feat_value in all_feature_values]).reshape(-1,1)
y_true = [1 if gid/100 in MALICIOUS_SCENARIOS else 0
for gid, feat_value in all_feature_values]
anomaly_scores = []
for value in all_values.flatten():
if value in pmf:
anomaly_scores.append(-pmf[value])
else:
prob = (0.0 + 1.0) / (total + nevents)
anomaly_scores.append(-prob)
# plot my own PR curve
precision, recall, ap = pr_curve(y_true, anomaly_scores)
print 'Scenario:', scenario, ap
plt.figure()
plt.plot(recall, precision, label='AUC (Scenario ' + str(scenario) + \
')={0:0.3f}'.format(ap), color=colours[i+1])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.legend()
plt.savefig('pr-' + feat_name + str(scenario) + '.pdf', bbox_inches='tight')
plt.clf()
plt.close()
#print all_values.flatten()[300:400]
#print anomaly_scores[300:400]
"""
# visualise anomaly scores
for i in range(len(test_feature_values)):
gid, feat_value = test_feature_values[i]
p = inv_anomaly_scores[i]
if gid/100 in BENIGN_SCENARIOS:
color = colours[0]
else:
color = colours[1]
plt.plot((feat_value, feat_value), (ycenter, ycenter + p),
'-', color=color)
"""
"""
# plot PR curve
precision, recall, _ = precision_recall_curve(y_true=y_true,
probas_pred=anomaly_scores)
ap = average_precision_score(y_true, anomaly_scores)
print precision
print recall
print ap
plt.figure()
plt.plot(recall, precision, label='AUC={0:0.2f}'.format(ap))
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.legend()
plt.savefig('pr-' + feat_name + '.pdf', bbox_inches='tight')
plt.clf()
plt.close()
"""
| apache-2.0 |
jzt5132/scikit-learn | examples/model_selection/randomized_search.py | 201 | 3214 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
rvraghav93/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 33 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
# #############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.14/_downloads/plot_lcmv_beamformer.py | 3 | 3174 | """
======================================
Compute LCMV beamformer on evoked data
======================================
Compute LCMV beamformer solutions on an evoked dataset for three different
choices of source orientation and store the solutions in stc files for
visualisation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import numpy as np
import mne
from mne.datasets import sample
from mne.beamformer import lcmv
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
subjects_dir = data_path + '/subjects'
###############################################################################
# Get epochs
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads', selection=left_temporal_channels)
# Pick the channels of interest
raw.pick_channels([raw.ch_names[pick] for pick in picks])
# Re-normalize our empty-room projectors, so they are fine after subselection
raw.info.normalize_proj()
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), preload=True, proj=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Compute regularized noise and data covariances
noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0, method='shrunk')
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='shrunk')
plt.close('all')
pick_oris = [None, 'normal', 'max-power']
names = ['free', 'normal', 'max-power']
descriptions = ['Free orientation', 'Normal orientation', 'Max-power '
'orientation']
colors = ['b', 'k', 'r']
for pick_ori, name, desc, color in zip(pick_oris, names, descriptions, colors):
stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.05,
pick_ori=pick_ori)
# View activation time-series
label = mne.read_label(fname_label)
stc_label = stc.in_label(label)
plt.plot(1e3 * stc_label.times, np.mean(stc_label.data, axis=0), color,
hold=True, label=desc)
plt.xlabel('Time (ms)')
plt.ylabel('LCMV value')
plt.ylim(-0.8, 2.2)
plt.title('LCMV in %s' % label_name)
plt.legend()
plt.show()
# Plot last stc in the brain in 3D with PySurfer if available
brain = stc.plot(hemi='lh', subjects_dir=subjects_dir,
initial_time=0.1, time_unit='s')
brain.show_view('lateral')
| bsd-3-clause |
bavardage/statsmodels | statsmodels/tools/print_version.py | 3 | 4607 | #!/usr/bin/env python
import sys
from os.path import dirname
def safe_version(module, attr='__version__'):
if not isinstance(attr, list):
attr = [attr]
try:
return reduce(getattr, [module] + attr)
except AttributeError:
return "Cannot detect version"
def show_versions():
print("\nINSTALLED VERSIONS")
print("------------------")
print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
try:
import os
(sysname, nodename, release, version, machine) = os.uname()
print("OS: %s %s %s %s" % (sysname, release, version,machine))
print("byteorder: %s" % sys.byteorder)
print("LC_ALL: %s" % os.environ.get('LC_ALL',"None"))
print("LANG: %s" % os.environ.get('LANG',"None"))
except:
pass
try:
import statsmodels
from statsmodels import version
has_sm = True
except ImportError:
has_sm = False
print('\nStatsmodels\n===========\n')
if has_sm:
print('Installed: %s (%s)' % (safe_version(version, 'full_version'),
dirname(statsmodels.__file__)))
else:
print('Not installed')
print("\nRequired Dependencies\n=====================\n")
try:
import Cython
print("cython: %s (%s)" % (safe_version(Cython),
dirname(Cython.__file__)))
except ImportError:
print("cython: Not installed")
try:
import numpy
print("numpy: %s (%s)" % (safe_version(numpy, ['version', 'version']),
dirname(numpy.__file__)))
except ImportError:
print("numpy: Not installed")
try:
import scipy
print("scipy: %s (%s)" % (safe_version(scipy, ['version', 'version']),
dirname(scipy.__file__)))
except ImportError:
print("scipy: Not installed")
try:
import pandas
print("pandas: %s (%s)" % (safe_version(pandas, ['version',
'version']),
dirname(pandas.__file__)))
except ImportError:
print("pandas: Not installed")
try:
import dateutil
print(" dateutil: %s (%s)" % (safe_version(dateutil),
dirname(dateutil.__file__)))
except ImportError:
print(" dateutil: not installed")
try:
import patsy
print("patsy: %s (%s)" % (safe_version(patsy),
dirname(patsy.__file__)))
except ImportError:
print("patsy: Not installed")
print("\nOptional Dependencies\n=====================\n")
try:
import matplotlib as mpl
print("matplotlib: %s (%s)" % (safe_version(mpl),
dirname(mpl.__file__)))
except ImportError:
print("matplotlib: Not installed")
try:
from cvxopt import info
print("cvxopt: %s (%s)" % (safe_version(info, 'version'),
dirname(info.__file__)))
except ImportError:
print("cvxopt: Not installed")
print("\nDeveloper Tools\n================\n")
try:
import IPython
print("IPython: %s (%s)" % (safe_version(IPython),
dirname(IPython.__file__)))
except ImportError:
print("IPython: Not installed")
try:
import jinja2
print(" jinja2: %s (%s)" % (safe_version(jinja2),
dirname(jinja2.__file__)))
except ImportError:
print(" jinja2: Not installed")
try:
import sphinx
print("sphinx: %s (%s)" % (safe_version(sphinx),
dirname(sphinx.__file__)))
except ImportError:
print("sphinx: Not installed")
try:
import pygments
print(" pygments: %s (%s)" % (safe_version(pygments),
dirname(pygments.__file__)))
except ImportError:
print(" pygments: Not installed")
try:
import nose
print("nose: %s (%s)" % (safe_version(nose), dirname(nose.__file__)))
except ImportError:
print("nose: Not installed")
try:
import virtualenv
print("virtualenv: %s (%s)" % (safe_version(virtualenv),
dirname(virtualenv.__file__)))
except ImportError:
print("virtualenv: Not installed")
print("\n")
if __name__ == "__main__":
show_versions()
| bsd-3-clause |
yuantw/MachineLearning | scikit-learn/classifications.py | 1 | 3066 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 16 23:59:21 2015
@author: yuantengwei
"""
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
df = pd.read_csv('csv/train.csv')
df = df.fillna('0')
exc_cols = [u'PassengerId', u'Survived', u'Name']
cols = [c for c in df.columns if c not in exc_cols]
x = df.ix[:, cols]
y = df['Survived'].values
from sklearn.feature_extraction import DictVectorizer
v = DictVectorizer()
x = v.fit_transform(x.to_dict(outtype='records')).toarray()
print('Vectorized:', x[10])
print('Unvectorized:', v.inverse_transform(x[10]))
from sklearn.cross_validation import train_test_split
data_train, data_test, target_train, target_test = train_test_split(x, y)
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
from sklearn.externals import joblib
import datetime
import os
estimators = {}
estimators['bayes'] = GaussianNB()
estimators['tree'] = tree.DecisionTreeClassifier()
estimators['forest_100'] = RandomForestClassifier(n_estimators = 100)
estimators['forest_10'] = RandomForestClassifier(n_estimators = 10)
estimators['svm_c_rbf'] = svm.SVC()
estimators['svm_c_linear'] = svm.SVC(kernel='linear')
estimators['svm_linear'] = svm.LinearSVC()
estimators['svm_nusvc'] = svm.NuSVC()
import csv
scoreFile = open('csv/scorefile.txt', 'w', newline='')
#csvWriter = csv.writer(scoreFile, delimiter =',', quotechar ='"', quoting=csv.QUOTE_MINIMAL)
for k in estimators.keys():
start_time = datetime.datetime.now()
print( '----%s----' % k)
scoreFile.write( '----{}----\r\n'.format(k))
estimators[k] = estimators[k].fit(data_train, target_train)
pred = estimators[k].predict(data_test)
print("%s Score: %0.2f" % (k, estimators[k].score(data_test, target_test)))
scoreFile.write("%s Score: %0.2f \r\n" % (k, estimators[k].score(data_test, target_test)))
scores = cross_validation.cross_val_score(estimators[k], data_test, target_test, cv=5)
print("%s Cross Avg. Score: %0.2f (+/- %0.2f)" % (k, scores.mean(), scores.std() * 2))
scoreFile.write("%s Cross Avg. Score: %0.2f (+/- %0.2f) \r\n" % (k, scores.mean(), scores.std() * 2))
end_time = datetime.datetime.now()
time_spend = end_time - start_time
print("%s Time: %0.2f" % (k, time_spend.total_seconds()))
scoreFile.write("%s Time: %0.2f \r\n" % (k, time_spend.total_seconds()))
#save model into respective folders
print(k)
print(os.curdir)
if not os.path.exists(k):
os.makedirs(k)
joblib.dump(estimators[k], '{}/{}.pkl'.format(k,k))
test = pd.read_csv('csv/test.csv')
test = test.fillna(0)
test_d = test.to_dict(outtype='records')
test_vec = v.transform(test_d).toarray()
for k in estimators.keys():
estimators[k] = estimators[k].fit(x, y)
pred = estimators[k].predict(test_vec)
test['Survived'] = pred
test.to_csv('csv/'+ k + '.csv', cols=['Survived', 'PassengerId'], index=False)
| mit |
ProstoMaxim/incubator-airflow | airflow/hooks/dbapi_hook.py | 14 | 9338 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
from past.builtins import basestring
from datetime import datetime
from contextlib import closing
import sys
from sqlalchemy import create_engine
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_uri(self):
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = '{conn.login}:{conn.password}@'.format(conn=conn)
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
return '{conn.conn_type}://{login}{host}/{conn.schema}'.format(
conn=conn, login=login, host=host)
def get_sqlalchemy_engine(self, engine_kwargs=None):
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
import pandas.io.sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if isinstance(sql, basestring):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for s in sql:
if sys.version_info[0] < 3:
s = s.encode('utf-8')
self.log.info(s)
if parameters is not None:
cur.execute(s, parameters)
else:
cur.execute(s)
conn.commit()
def set_autocommit(self, conn, autocommit):
conn.autocommit = autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
l = []
for cell in row:
l.append(self._serialize_cell(cell, conn))
values = tuple(l)
placeholders = ["%s",]*len(values)
sql = "INSERT INTO {0} {1} VALUES ({2});".format(
table,
target_fields,
",".join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info(
"Loaded {i} into {table} rows so far".format(**locals())
)
conn.commit()
self.log.info(
"Done loading. Loaded a total of {i} rows".format(**locals()))
@staticmethod
def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
| apache-2.0 |
psi4/DatenQM | benchmarks/bench_copy.py | 2 | 3260 | # def bench(factory, X, Y, X_test, Y_test, ref_coef):
# gc.collect()
#
# # start time
# tstart = time()
# clf = factory(alpha=alpha).fit(X, Y)
# delta = (time() - tstart)
# # stop time
#
# print("duration: %0.3fs" % delta)
# print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
# print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
# return delta
#
#
# if __name__ == '__main__':
# # Delayed import of matplotlib.pyplot
# import matplotlib.pyplot as plt
#
# scikit_results = []
#
# n = 20
# step = 500
# n_features = 1000
# n_informative = int(n_features / 10)
# n_test_samples = 1000
# for i in range(1, n + 1):
# print('==================')
# print('Iteration %s of %s' % (i, n))
# print('==================')
#
# X, Y, coef_ = make_regression(
# n_samples=(i * step) + n_test_samples, n_features=n_features,
# noise=0.1, n_informative=n_informative, coef=True)
#
# X_test = X[-n_test_samples:]
# Y_test = Y[-n_test_samples:]
# X = X[:(i * step)]
# Y = Y[:(i * step)]
#
# print("benchmarking scikit-learn: ")
# scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
# print("benchmarking glmnet: ")
# # glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
#
# plt.clf()
# xx = range(0, n * step, step)
# plt.title('Lasso regression on sample dataset (%d features)' % n_features)
# plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
# # plt.plot(xx, glmnet_results, 'r-', label='glmnet')
# plt.legend()
# plt.xlabel('number of samples to classify')
# plt.ylabel('Time (s)')
# plt.show()
#
# # now do a benchmark where the number of points is fixed
# # and the variable is the number of features
#
# scikit_results = []
# # glmnet_results = []
# n = 20
# step = 100
# n_samples = 500
#
# for i in range(1, n + 1):
# print('==================')
# print('Iteration %02d of %02d' % (i, n))
# print('==================')
# n_features = i * step
# n_informative = n_features / 10
#
# X, Y, coef_ = make_regression(
# n_samples=(i * step) + n_test_samples, n_features=n_features,
# noise=0.1, n_informative=n_informative, coef=True)
#
# X_test = X[-n_test_samples:]
# Y_test = Y[-n_test_samples:]
# X = X[:n_samples]
# Y = Y[:n_samples]
#
# print("benchmarking scikit-learn: ")
# scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
# print("benchmarking glmnet: ")
# # glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
#
# xx = np.arange(100, 100 + n * step, step)
# plt.figure('scikit-learn vs. glmnet benchmark results')
# plt.title('Regression in high dimensional spaces (%d samples)' % n_samples)
# plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
# # plt.plot(xx, glmnet_results, 'r-', label='glmnet')
# plt.legend()
# plt.xlabel('number of features')
# plt.ylabel('Time (s)')
# plt.axis('tight')
# plt.show() | bsd-3-clause |
dsockwell/trading-with-python | cookbook/getDataFromYahooFinance.py | 77 | 1391 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 18:37:23 2011
@author: jev
"""
from urllib import urlretrieve
from urllib2 import urlopen
from pandas import Index, DataFrame
from datetime import datetime
import matplotlib.pyplot as plt
sDate = (2005,1,1)
eDate = (2011,10,1)
symbol = 'SPY'
fName = symbol+'.csv'
try: # try to load saved csv file, otherwise get from the net
fid = open(fName)
lines = fid.readlines()
fid.close()
print 'Loaded from ' , fName
except Exception as e:
print e
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
print 'Downloading from ', urlStr
urlretrieve(urlStr,symbol+'.csv')
lines = urlopen(urlStr).readlines()
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(zip(['open','high','low','close','volume','adj_close'],data))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort()
df.plot(secondary_y=['volume'])
| bsd-3-clause |
MPTCP-smartphone-thesis/pcap-measurement | cdf_duration_bytes.py | 1 | 11276 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Quentin De Coninck
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# To install on this machine: matplotlib, numpy
from __future__ import print_function
import argparse
import common as co
import common_graph as cog
import matplotlib
# Do not use any X11 backend
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
import mptcp
import numpy as np
import os
import tcp
##################################################
## ARGUMENTS ##
##################################################
parser = argparse.ArgumentParser(
description="Summarize stat files generated by analyze")
parser.add_argument("-s",
"--stat", help="directory where the stat files are stored", default=co.DEF_STAT_DIR + '_' + co.DEF_IFACE)
parser.add_argument('-S',
"--sums", help="directory where the summary graphs will be stored", default=co.DEF_SUMS_DIR + '_' + co.DEF_IFACE)
parser.add_argument("-d",
"--dirs", help="list of directories to aggregate", nargs="+")
args = parser.parse_args()
stat_dir_exp = os.path.abspath(os.path.expanduser(args.stat))
sums_dir_exp = os.path.abspath(os.path.expanduser(args.sums))
co.check_directory_exists(sums_dir_exp)
##################################################
## GET THE DATA ##
##################################################
connections = cog.fetch_valid_data(stat_dir_exp, args)
multiflow_connections, singleflow_connections = cog.get_multiflow_connections(connections)
##################################################
## PLOTTING RESULTS ##
##################################################
def plot(connections, multiflow_connections, sums_dir_exp):
ALERT_DURATION = 3600
ALERT_BYTES = 50000000
data_duration = []
subflows_duration = []
data_bytes = []
subflows_bytes = []
data_packets = []
ports_11_bytes = {}
color = 'red'
base_graph_name_duration = "summary_cdf_duration"
base_graph_path_duration = os.path.join(sums_dir_exp, base_graph_name_duration)
base_graph_name_bytes = "summary_cdf_bytes"
base_graph_path_bytes = os.path.join(sums_dir_exp, base_graph_name_bytes)
max_duration = 0.0
fname_max_duration = None
conn_id_max_duration = None
max_bytes = 0
fname_max_bytes = None
conn_id_max_bytes = None
for fname, conns in connections.iteritems():
for conn_id, conn in conns.iteritems():
if isinstance(conn, mptcp.MPTCPConnection) and co.DURATION in conn.attr:
duration = conn.attr[co.DURATION]
bytes = 0
data_pkts = 0
for direction in co.DIRECTIONS:
bytes += conn.attr[direction][co.BYTES_MPTCPTRACE]
if co.BURSTS in conn.attr[direction]:
for flow_id, seq_burst, pkt_burst, seq_duration, time in conn.attr[direction][co.BURSTS]:
data_pkts += pkt_burst
if bytes == 11:
if co.SOCKS_PORT in conn.attr:
socks_port = conn.attr[co.SOCKS_PORT]
if socks_port not in ports_11_bytes:
ports_11_bytes[socks_port] = 1
else:
ports_11_bytes[socks_port] += 1
if duration >= ALERT_DURATION:
print("DURATION", fname, conn_id, duration)
if bytes >= ALERT_BYTES:
print("BYTES", fname, conn_id, bytes)
if duration > max_duration:
max_duration = duration
fname_max_duration = fname
conn_id_max_duration = conn_id
if bytes > max_bytes:
max_bytes = bytes
fname_max_bytes = fname
conn_id_max_bytes = conn_id
data_duration.append(duration)
data_bytes.append(bytes)
data_packets.append(data_pkts)
for flow_id, flow in conn.flows.iteritems():
if co.DURATION in flow.attr:
subflows_duration.append(flow.attr[co.DURATION])
subflow_bytes = 0
for direction in co.DIRECTIONS:
subflow_bytes += flow.attr[direction][co.BYTES_DATA]
subflows_bytes.append(subflow_bytes)
# co.plot_cdfs_natural(data_duration, color, 'Seconds [s]', base_graph_path_duration)
# co.plot_cdfs_natural(data_duration, color, 'Seconds [s]', base_graph_path_duration + '_log', xlog=True)
print("MAX DURATION", max_duration, fname_max_duration, conn_id_max_duration)
print("MAX BYTES", max_bytes, fname_max_bytes, conn_id_max_bytes)
print("PERCENTAGE 0 s", len([x for x in data_duration if x <= 0.00001]) * 100.0 / len(data_duration))
print("PERCENTAGE <=10 s", len([x for x in data_duration if x <= 10.0]) * 100.0 / len(data_duration))
print("PERCENTAGE >=100 s", len([x for x in data_duration if x >= 100.0]) * 100.0 / len(data_duration))
print("PERCENTAGE >=1 day", len([x for x in data_duration if x >= 86400.0]) * 100.0 / len(data_duration))
print("NUMBER >=1 day", len([x for x in data_duration if x >= 86400.0]))
print("PERCENTAGE <= 1KB", len([x for x in data_bytes if x <= 1000]) * 100.0 / len(data_bytes))
print("PERCENTAGE <= 2KB", len([x for x in data_bytes if x <= 2000]) * 100.0 / len(data_bytes))
print("PERCENTAGE <= 5KB", len([x for x in data_bytes if x <= 5000]) * 100.0 / len(data_bytes))
print("PERCENTAGE <= 10KB", len([x for x in data_bytes if x <= 10000]) * 100.0 / len(data_bytes))
print("PERCENTAGE [9;11] B", len([x for x in data_bytes if x <= 11 and x >= 9]) * 100.0 / len(data_bytes))
print("PORTS 11 B", ports_11_bytes)
print("PERCENTAGE <= 2 packs", len([x for x in data_packets if x <= 2]) * 100.0 / len(data_packets))
print("PERCENTAGE <= 3 packs", len([x for x in data_packets if x <= 3]) * 100.0 / len(data_packets))
print("PERCENTAGE <= 5 packs", len([x for x in data_packets if x <= 5]) * 100.0 / len(data_packets))
print("PERCENTAGE <= 10 packs", len([x for x in data_packets if x <= 10]) * 100.0 / len(data_packets))
plt.figure()
plt.clf()
fig, ax = plt.subplots()
graph_fname = os.path.splitext(base_graph_path_duration)[0] + "_cdf_log.pdf"
sample = np.array(sorted(data_duration))
sorted_array = np.sort(sample)
yvals = np.arange(len(sorted_array)) / float(len(sorted_array))
if len(sorted_array) > 0:
# Add a last point
sorted_array = np.append(sorted_array, sorted_array[-1])
yvals = np.append(yvals, 1.0)
ax.plot(sorted_array, yvals, color=color, linewidth=2, label="MPTCP Connections")
# Shrink current axis's height by 10% on the top
# box = ax.get_position()
# ax.set_position([box.x0, box.y0,
# box.width, box.height * 0.9])
# sample = np.array(sorted(subflows_duration))
# sorted_array = np.sort(sample)
# yvals = np.arange(len(sorted_array)) / float(len(sorted_array))
# if len(sorted_array) > 0:
# # Add a last point
# sorted_array = np.append(sorted_array, sorted_array[-1])
# yvals = np.append(yvals, 1.0)
# ax.plot(sorted_array, yvals, color='blue', linestyle='--', linewidth=2, label="Subflows")
# Shrink current axis's height by 10% on the top
# box = ax.get_position()
# ax.set_position([box.x0, box.y0,
# box.width, box.height * 0.9])
ax.set_xscale('log')
# Put a legend above current axis
# ax.legend(loc='lower center', bbox_to_anchor=(0.5, 1.05), fancybox=True, shadow=True, ncol=ncol)
ax.legend(loc='lower right')
plt.xlabel('Time [s]', fontsize=24)
plt.ylabel("CDF", fontsize=24)
plt.savefig(graph_fname)
plt.close('all')
base_graph_path_duration_hist = os.path.splitext(base_graph_path_duration)[0] + '_hist'
plt.figure()
plt.hist(subflows_duration, bins=np.logspace(-3, 5, 81), log=True)
plt.xlabel("Duration of connections [s]", fontsize=24, labelpad=-2)
plt.ylabel("Connections", fontsize=24)
plt.gca().set_xscale("log")
plt.savefig(base_graph_path_duration_hist + "_log.pdf")
plt.close()
plt.figure()
plt.hist(subflows_duration, bins=np.logspace(-3, 5, 81))
plt.xlabel("Duration of connections [s]", fontsize=24, labelpad=-2)
plt.ylabel("Connections", fontsize=24)
# plt.gca().set_xscale("log")
plt.savefig(base_graph_path_duration_hist + ".pdf")
plt.close()
plt.figure()
plt.clf()
fig, ax = plt.subplots()
graph_fname = os.path.splitext(base_graph_path_bytes)[0] + "_cdf_log.pdf"
sample = np.array(sorted(data_bytes))
sorted_array = np.sort(sample)
yvals = np.arange(len(sorted_array)) / float(len(sorted_array))
if len(sorted_array) > 0:
# Add a last point
sorted_array = np.append(sorted_array, sorted_array[-1])
yvals = np.append(yvals, 1.0)
ax.plot(sorted_array, yvals, color=color, linewidth=2, label="MPTCP Connections")
# Shrink current axis's height by 10% on the top
# box = ax.get_position()
# ax.set_position([box.x0, box.y0,
# box.width, box.height * 0.9])
# sample = np.array(sorted(subflows_bytes))
# sorted_array = np.sort(sample)
# yvals = np.arange(len(sorted_array)) / float(len(sorted_array))
# if len(sorted_array) > 0:
# # Add a last point
# sorted_array = np.append(sorted_array, sorted_array[-1])
# yvals = np.append(yvals, 1.0)
# ax.plot(sorted_array, yvals, color='blue', linestyle='--', linewidth=2, label="Subflows")
# Shrink current axis's height by 10% on the top
# box = ax.get_position()
# ax.set_position([box.x0, box.y0,
# box.width, box.height * 0.9])
ax.set_xscale('log')
# Put a legend above current axis
# ax.legend(loc='lower center', bbox_to_anchor=(0.5, 1.05), fancybox=True, shadow=True, ncol=ncol)
ax.legend(loc='lower right')
plt.xlabel('Bytes', fontsize=24, labelpad=-1)
plt.ylabel("CDF", fontsize=24)
plt.savefig(graph_fname)
plt.close('all')
plot(connections, multiflow_connections, sums_dir_exp)
| gpl-3.0 |
memo/tensorflow | tensorflow/contrib/learn/python/learn/estimators/multioutput_test.py | 136 | 1696 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-output tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
from tensorflow.python.platform import test
class MultiOutputTest(test.TestCase):
"""Multi-output tests."""
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
x = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(x).ravel(), np.pi * np.cos(x).ravel()]).T
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(x),
label_dimension=2)
regressor.fit(x, y, steps=100)
score = mean_squared_error(np.array(list(regressor.predict_scores(x))), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
BIDS-collaborative/destress | word2vec/graphCounts.py | 2 | 1161 | """
graphCounts plots the histogram
of the counts (by reading in the outputed text file from countWords.py)
"""
from matplotlib import pyplot as plt
import numpy as np
if __name__ == '__main__':
outputCount = open('/Users/gyoo/BIDS/destress/word2vec/word_counts.txt', 'r')
# outputCount = open('/var/local/destress/text_sent_ids/word_counts.txt', 'r')
counts = outputCount.readlines()
outputCount.close()
wordCount = {}
for count in counts:
c = count.split()
cnt = int(c[0])
numSents = int(c[2])
wordCount[cnt] = numSents
points = []
for k, v in wordCount.iteritems():
print(k)
# if k <= 50:
for i in range(v):
points.append(k)
plt.title("Length (50 and Below) of Live Journal Sentences")
plt.xlabel("Length of sentence (in words)")
plt.ylabel("Frequency")
plt.hist(points, bins=50)
plt.yscale('log', nonposy='clip')
plt.show()
# plt.title("Length (50 and Below) of Live Journal Sentences")
# plt.xlabel("Length of sentence (in words)")
# plt.ylabel("Frequency")
# plt.hist(points, bins=50)
# plt.show()
| isc |
aabadie/scikit-learn | examples/plot_compare_reduction.py | 19 | 2489 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================================
Selecting dimensionality reduction with Pipeline and GridSearchCV
=================================================================
This example constructs a pipeline that does dimensionality
reduction followed by prediction with a support vector
classifier. It demonstrates the use of GridSearchCV and
Pipeline to optimize over different classes of estimators in a
single CV run -- unsupervised PCA and NMF dimensionality
reductions are compared to univariate feature selection during
the grid search.
"""
# Authors: Robert McGibbon, Joel Nothman
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.decomposition import PCA, NMF
from sklearn.feature_selection import SelectKBest, chi2
print(__doc__)
pipe = Pipeline([
('reduce_dim', PCA()),
('classify', LinearSVC())
])
N_FEATURES_OPTIONS = [2, 4, 8]
C_OPTIONS = [1, 10, 100, 1000]
param_grid = [
{
'reduce_dim': [PCA(iterated_power=7), NMF()],
'reduce_dim__n_components': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
{
'reduce_dim': [SelectKBest(chi2)],
'reduce_dim__k': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
]
reducer_labels = ['PCA', 'NMF', 'KBest(chi2)']
grid = GridSearchCV(pipe, cv=3, n_jobs=2, param_grid=param_grid)
digits = load_digits()
grid.fit(digits.data, digits.target)
mean_scores = np.array(grid.cv_results_['mean_test_score'])
# scores are in the order of param_grid iteration, which is alphabetical
mean_scores = mean_scores.reshape(len(C_OPTIONS), -1, len(N_FEATURES_OPTIONS))
# select score for best C
mean_scores = mean_scores.max(axis=0)
bar_offsets = (np.arange(len(N_FEATURES_OPTIONS)) *
(len(reducer_labels) + 1) + .5)
plt.figure()
COLORS = 'bgrcmyk'
for i, (label, reducer_scores) in enumerate(zip(reducer_labels, mean_scores)):
plt.bar(bar_offsets + i, reducer_scores, label=label, color=COLORS[i])
plt.title("Comparing feature reduction techniques")
plt.xlabel('Reduced number of features')
plt.xticks(bar_offsets + len(reducer_labels) / 2, N_FEATURES_OPTIONS)
plt.ylabel('Digit classification accuracy')
plt.ylim((0, 1))
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
SNeugber/OpenVault | Plotting/timeStopped.py | 1 | 3376 | import sys
import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as tkr
from mpl_toolkits.mplot3d import Axes3D as a3
from matplotlib.collections import PolyCollection
# Functs
do_avg = np.vectorize(np.average)
total_stopped = np.vectorize(lambda x : sum([1 for i in x if i < 0.1])/10.0) # Number of 0.1s timesteps (hence the division) at zero speed
def getData(path,filename):
# Read data from vector csvs
vec_files = glob.glob(path+'/'+filename+'*.vec') # List of all vector files
runs = sorted([int(f[f.find('-')+1:f.find('.')]) for f in vec_files]) # Run indeces of available vector files
allData=[]
for run in runs:
nodeSpeedVectors=dict()
nodeSpeeds=dict()
f=open(path+'/'+filename+'-'+str(run)+'.vec')
for line in f:
if line.strip() == "": continue
line=line.split()
if len(line) < 4: continue
if line[0]=='vector' and line[3]=='speed':
node=int(line[2][line[2].find('[')+1:line[2].find(']')])
vector=int(line[1])
nodeSpeedVectors[vector]=node
nodeSpeeds[node]=[]
else:
try:
vector=int(line[0])
if vector in nodeSpeedVectors:
nodeSpeeds[nodeSpeedVectors[vector]].append(float(line[3]))
except ValueError:
pass
allData.append([v for (k,v) in sorted(nodeSpeeds.iteritems())])
return total_stopped(np.array(allData,dtype=object))
c1Data=getData('1car','testSpawnRateConfig5')
c2Data=getData('2car','testSpawnRateConfig6')
c3Data=getData('3car','testSpawnRateConfig7')
c3unevenData=getData('3car_uneven','testSpawnRateConfig7')
c4Data=getData('4car','testSpawnRateConfig8')
c5Data=getData('5cars','testSpawnRateConfig1')
c10Data=getData('10cars','testSpawnRateConfig2')
c50Data=getData('50cars','testSpawnRateConfig3')
c100Data=getData('100cars','testSpawnRateConfig4')
#print c5Data
avgs=[]
avgs.append(np.average(c1Data))
avgs.append(np.average(c2Data))
avgs.append(np.average(c3Data))
avgs.append(np.average(c3unevenData))
avgs.append(np.average(c4Data))
avgs.append(np.average(c5Data))
avgs.append(np.average(c10Data))
avgs.append(np.average(c50Data))
avgs.append(np.average(c100Data))
xs=[1,2,3,3.5,4,5,10,50,100]
#print xs
#print avgs
fig = plt.figure()
plt.suptitle('Length of traffic jams measured as average stationary time')
ax=fig.add_subplot(111)
l1=ax.scatter(xs,avgs)
ax.plot(xs,avgs)
ax.set_xscale('log')
ax.set_xlim(xmin=0.9,xmax=110)
ax.xaxis.set_major_formatter(tkr.ScalarFormatter())
ax.set_xlabel('Insertion frequency (s)')
ax.set_ylabel('Average stationary time (s)')
fig = plt.figure()
t=plt.title('Progression of stopping times over all cars')
ax = fig.add_subplot(111)
l1=ax.plot(np.average(c1Data,axis=1),label='1')
l2=ax.plot(np.average(c2Data,axis=1),label='2')
l3=ax.plot(np.average(c3Data,axis=1),label='3')
l9=ax.plot(np.average(c3unevenData,axis=1),label='3.5')
l4=ax.plot(np.average(c4Data,axis=1),label='4')
l5=ax.plot(np.average(c5Data,axis=1),label='5')
l6=ax.plot(np.average(c10Data,axis=1),label='10')
l7=ax.plot(np.average(c50Data,axis=1),label='50')
l8=ax.plot(np.average(c100Data,axis=1),label='100')
ax.set_xticks(range(0,np.shape(c5Data)[0]+1,10))
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
#ax.legend([l1,l2,l3,l4,l5,l6,l7,l8,l9],['1','2','3','4','5','10','50','100''3.5'],numpoints=1, loc=0, prop={'size':10})
#ax.set_yticks(range(0,200,10))
#ax.set_ylim(ymin=-10)
plt.show() | apache-2.0 |
markovg/nest-simulator | pynest/nest/voltage_trace.py | 18 | 7823 | # -*- coding: utf-8 -*-
#
# voltage_trace.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Functions to plot voltage traces.
"""
import nest
import numpy
import pylab
def from_file(fname, title=None, grayscale=False):
"""Plot voltage trace from file.
Parameters
----------
fname : str or list
Filename or list of filenames to load from
title : str, optional
Plot title
grayscale : bool, optional
Plot in grayscale
Raises
------
ValueError
"""
if nest.is_iterable(fname):
data = None
for f in fname:
if data is None:
data = numpy.loadtxt(f)
else:
data = numpy.concatenate((data, numpy.loadtxt(f)))
else:
data = numpy.loadtxt(fname)
if grayscale:
line_style = "k"
else:
line_style = ""
if len(data.shape) == 1:
print("INFO: only found 1 column in the file. \
Assuming that only one neuron was recorded.")
plotid = pylab.plot(data, line_style)
pylab.xlabel("Time (steps of length interval)")
elif data.shape[1] == 2:
print("INFO: found 2 columns in the file. Assuming \
them to be gid, pot.")
plotid = []
data_dict = {}
for d in data:
if not d[0] in data_dict:
data_dict[d[0]] = [d[1]]
else:
data_dict[d[0]].append(d[1])
for d in data_dict:
plotid.append(
pylab.plot(data_dict[d], line_style, label="Neuron %i" % d)
)
pylab.xlabel("Time (steps of length interval)")
pylab.legend()
elif data.shape[1] == 3:
plotid = []
data_dict = {}
g = data[0][0]
t = []
for d in data:
if not d[0] in data_dict:
data_dict[d[0]] = [d[2]]
else:
data_dict[d[0]].append(d[2])
if d[0] == g:
t.append(d[1])
for d in data_dict:
plotid.append(
pylab.plot(t, data_dict[d], line_style, label="Neuron %i" % d)
)
pylab.xlabel("Time (ms)")
pylab.legend()
else:
raise ValueError("Inappropriate data shape %i!" % data.shape)
if not title:
title = "Membrane potential from file '%s'" % fname
pylab.title(title)
pylab.ylabel("Membrane potential (mV)")
pylab.draw()
return plotid
def from_device(detec, neurons=None, title=None, grayscale=False,
timeunit="ms"):
"""Plot the membrane potential of a set of neurons recorded by
the given Voltmeter or Multimeter.
Parameters
----------
detec : list
Global id of Voltmeter or Multimeter in a list, e.g. [1]
neurons : list, optional
Indices of of neurons to plot
title : str, optional
Plot title
grayscale : bool, optional
Plot in grayscale
timeunit : str, optional
Unit of time
Raises
------
nest.NESTError
Description
"""
if len(detec) > 1:
raise nest.NESTError("Please provide a single voltmeter.")
if not nest.GetStatus(detec)[0]['model'] in ('voltmeter', 'multimeter'):
raise nest.NESTError("Please provide a voltmeter or a \
multimeter measuring V_m.")
elif nest.GetStatus(detec)[0]['model'] == 'multimeter':
if "V_m" not in nest.GetStatus(detec, "record_from")[0]:
raise nest.NESTError("Please provide a multimeter \
measuring V_m.")
elif (not nest.GetStatus(detec, "to_memory")[0] and
len(nest.GetStatus(detec, "record_from")[0]) > 1):
raise nest.NESTError("Please provide a multimeter measuring \
only V_m or record to memory!")
if nest.GetStatus(detec, "to_memory")[0]:
timefactor = 1.0
if not nest.GetStatus(detec)[0]['time_in_steps']:
if timeunit == "s":
timefactor = 1000.0
else:
timeunit = "ms"
times, voltages = _from_memory(detec)
if not len(times):
raise nest.NESTError("No events recorded! Make sure that \
withtime and withgid are set to True.")
if neurons is None:
neurons = voltages.keys()
plotids = []
for neuron in neurons:
time_values = numpy.array(times[neuron]) / timefactor
if grayscale:
line_style = "k"
else:
line_style = ""
try:
plotids.append(
pylab.plot(time_values, voltages[neuron],
line_style, label="Neuron %i" % neuron)
)
except KeyError:
print("INFO: Wrong ID: {0}".format(neuron))
if not title:
title = "Membrane potential"
pylab.title(title)
pylab.ylabel("Membrane potential (mV)")
if nest.GetStatus(detec)[0]['time_in_steps']:
pylab.xlabel("Steps")
else:
pylab.xlabel("Time (%s)" % timeunit)
pylab.legend(loc="best")
pylab.draw()
return plotids
elif nest.GetStatus(detec, "to_file")[0]:
fname = nest.GetStatus(detec, "filenames")[0]
return from_file(fname, title, grayscale)
else:
raise nest.NESTError("Provided devices neither records to file, \
nor to memory.")
def _from_memory(detec):
"""Get voltage traces from memory.
----------
detec : list
Global id of Voltmeter or Multimeter
"""
import array
ev = nest.GetStatus(detec, 'events')[0]
potentials = ev['V_m']
senders = ev['senders']
v = {}
t = {}
if 'times' in ev:
times = ev['times']
for s, currentsender in enumerate(senders):
if currentsender not in v:
v[currentsender] = array.array('f')
t[currentsender] = array.array('f')
v[currentsender].append(float(potentials[s]))
t[currentsender].append(float(times[s]))
else:
# reconstruct the time vector, if not stored explicitly
detec_status = nest.GetStatus(detec)[0]
origin = detec_status['origin']
start = detec_status['start']
interval = detec_status['interval']
senders_uniq = numpy.unique(senders)
num_intvls = len(senders) / len(senders_uniq)
times_s = origin + start + interval + \
interval * numpy.array(range(num_intvls))
for s, currentsender in enumerate(senders):
if currentsender not in v:
v[currentsender] = array.array('f')
t[currentsender] = times_s
v[currentsender].append(float(potentials[s]))
return t, v
def show():
"""Call pylab.show() to show all figures and enter the GUI main loop.
Python will block until all figure windows are closed again.
You should call this function only once at the end of a script.
See also: http://matplotlib.sourceforge.net/faq/howto_faq.html#use-show
"""
pylab.show()
| gpl-2.0 |
Heathckliff/cantera | interfaces/cython/cantera/examples/reactors/piston.py | 3 | 2658 | """
Gas 1: a stoichiometric H2/O2/Ar mixture
Gas 2: a wet CO/O2 mixture
-------------------------------------
| || |
| || |
| gas 1 || gas 2 |
| || |
| || |
-------------------------------------
The two volumes are connected by an adiabatic free piston. The piston speed is
proportional to the pressure difference between the two chambers.
Note that each side uses a *different* reaction mechanism
"""
import sys
import cantera as ct
fmt = '%10.3f %10.1f %10.4f %10.4g %10.4g %10.4g %10.4g'
print('%10s %10s %10s %10s %10s %10s %10s' % ('time [s]','T1 [K]','T2 [K]',
'V1 [m^3]', 'V2 [m^3]',
'V1+V2 [m^3]','X(CO)'))
gas1 = ct.Solution('h2o2.cti')
gas1.TPX = 900.0, ct.one_atm, 'H2:2, O2:1, AR:20'
gas2 = ct.Solution('gri30.xml')
gas2.TPX = 900.0, ct.one_atm, 'CO:2, H2O:0.01, O2:5'
r1 = ct.IdealGasReactor(gas1)
r1.volume = 0.5
r2 = ct.IdealGasReactor(gas2)
r2.volume = 0.1
# The wall is held fixed until t = 0.1 s, then released to allow the pressure to
# equilibrate.
def v(t):
if t < 0.1:
return 0.0
else:
return (r1.thermo.P - r2.thermo.P) * 1e-4
w = ct.Wall(r1, r2, velocity=v)
net = ct.ReactorNet([r1, r2])
tim = []
t1 = []
t2 = []
v1 = []
v2 = []
v = []
xco = []
xh2 = []
for n in range(200):
time = (n+1)*0.001
net.advance(time)
if n % 4 == 3:
print(fmt % (time, r1.T, r2.T, r1.volume, r2.volume,
r1.volume + r2.volume, r2.thermo['CO'].X[0]))
tim.append(time * 1000)
t1.append(r1.T)
t2.append(r2.T)
v1.append(r1.volume)
v2.append(r2.volume)
v.append(r1.volume + r2.volume)
xco.append(r2.thermo['CO'].X[0])
xh2.append(r1.thermo['H2'].X[0])
# plot the results if matplotlib is installed.
if '--plot' in sys.argv:
import matplotlib.pyplot as plt
plt.subplot(2,2,1)
plt.plot(tim,t1,'-',tim,t2,'r-')
plt.xlabel('Time (ms)')
plt.ylabel('Temperature (K)')
plt.subplot(2,2,2)
plt.plot(tim,v1,'-',tim,v2,'r-',tim,v,'g-')
plt.xlabel('Time (ms)')
plt.ylabel('Volume (m3)')
plt.subplot(2,2,3)
plt.plot(tim,xco)
plt.xlabel('Time (ms)')
plt.ylabel('CO Mole Fraction (right)')
plt.subplot(2,2,4)
plt.plot(tim,xh2)
plt.xlabel('Time (ms)')
plt.ylabel('H2 Mole Fraction (left)')
plt.tight_layout()
plt.show()
else:
print("""To view a plot of these results, run this script with the option --plot""")
| bsd-3-clause |
ubccr/tacc_stats | analyze/process_pickles/plotkey_paper.py | 1 | 4011 | #!/usr/bin/env python
import analyze_conf
import sys
import datetime, glob, job_stats, os, subprocess, time
import matplotlib
# Set the matplotlib output mode from config if it exists
if not 'matplotlib.pyplot' in sys.modules:
try:
matplotlib.use(analyze_conf.matplotlib_output_mode)
except NameError:
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import numpy
import scipy, scipy.stats
import argparse
import tspl, tspl_utils
def main():
parser = argparse.ArgumentParser(description='Plot a key pair for some jobs')
parser.add_argument('-t', help='Threshold', metavar='thresh')
parser.add_argument('key1', help='First key', nargs='?',
default='amd64_core')
parser.add_argument('key2', help='Second key', nargs='?',
default='SSE_FLOPS')
parser.add_argument('filearg', help='File, directory, or quoted'
' glob pattern', nargs='?',default='jobs')
parser.add_argument('-f', help='Set full mode', action='store_true')
parser.add_argument('-m', help='Set heatmap mode', action='store_true')
parser.add_argument('--max', help='Use max instead of mean',
action='store_true')
n=parser.parse_args()
filelist=tspl_utils.getfilelist(n.filearg)
if n.max:
func=max
else:
func=scipy.stats.tmean
for file in filelist:
try:
if n.f:
full='_full'
ts=tspl.TSPLBase(file,[n.key1],[n.key2])
else:
full=''
ts=tspl.TSPLSum(file,[n.key1],[n.key2])
except tspl.TSPLException as e:
continue
if not tspl_utils.checkjob(ts,3600,16):
continue
reduction=[] # place to store reductions via func
for v in ts:
rate=numpy.divide(numpy.diff(v),numpy.diff(ts.t))
reduction.append(func(rate))
m=func(reduction)
if not n.t or m > float(n.t):
print ts.j.id + ': ' + str(m)
if n.m:
heatmap(ts,n,m,full)
else:
lineplot(ts,n,m,full)
else:
print ts.j.id + ': under threshold, ' + str(m) + ' < ' + n.t
# Plot key pair vs. time in a a traditional y vs. t line plot--one line per host
# (normal) or one line per data stream (full)
def lineplot(ts,n,m,full):
tmid=(ts.t[:-1]+ts.t[1:])/2.0
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=80)
ax.hold=True
ymin=0. # Wrong in general, but min must be 0. or less
ymax=0.
for v in ts:
rate=numpy.divide(numpy.diff(v),numpy.diff(ts.t))
ymin=min(ymin,min(rate))
ymax=max(ymax,max(rate))
print rate[-4:-1]
if numpy.all (rate[-4:-1] == [0. ,0. ,0.]) :
continue
ax.plot(tmid/3600,rate,'o-')
ymin,ymax=tspl_utils.expand_range(ymin,ymax,0.1)
ax.set_ylim(bottom=ymin,top=ymax)
# title=ts.title + ', V: %(V)-8.3g' % {'V' : m}
# plt.suptitle(title)
ax.set_xlabel('Time (hr)')
# ax.set_ylabel('Total ' + ts.label(ts.k1[0],ts.k2[0]) + '/s')
ax.set_ylabel('L1 Cache Fill Rate (lines/s)')
fname='_'.join(['graph',ts.j.id,ts.k1[0],ts.k2[0],'vs_t'+full])
fig.savefig(fname)
plt.close()
# Plot a heat map of the data. X-axis time, Y-axis host (normal) or data stream
# (full). Colorbar for data range.
def heatmap(ts,n,m,full):
tmid=(ts.t[:-1]+ts.t[1:])/2.0
fig,ax=plt.subplots(1,1,figsize=(8,6),dpi=80)
ymin=0. # Wrong in general, but min must be 0. or less
ymax=0.
first=True
for v in ts:
rate=numpy.divide(numpy.diff(v),numpy.diff(ts.t))
if first:
r=rate
first=False
else:
r=numpy.vstack((r,rate))
ymin=min(ymin,min(rate))
ymax=max(ymax,max(rate))
ymin,ymax=tspl_utils.expand_range(ymin,ymax,0.1)
l=r.shape[0]
y=numpy.arange(l)
plt.pcolor(tmid/3600,y,r)
plt.colorbar()
plt.clim(ymin,ymax)
title=ts.title + ', V: %(V)-8.3g' % {'V' : m}
plt.suptitle(title)
ax.set_xlabel('Time (hr)')
if n.f:
ax.set_ylabel('Item')
else:
ax.set_ylabel('Host')
fname='_'.join(['graph',ts.j.id,ts.k1[0],ts.k2[0],'heatmap'+full])
fig.savefig(fname)
plt.close()
if __name__ == '__main__':
main()
| lgpl-2.1 |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| gpl-2.0 |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/site-packages/dask/dataframe/io/demo.py | 4 | 8227 | from __future__ import absolute_import, division, print_function
import pandas as pd
import numpy as np
from ..core import tokenize, DataFrame
from .io import from_delayed
from ...delayed import delayed
from ...utils import random_state_data
__all__ = ['make_timeseries']
def make_float(n, rstate):
return rstate.rand(n) * 2 - 1
def make_int(n, rstate):
return rstate.poisson(1000, size=n)
names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank', 'George',
'Hannah', 'Ingrid', 'Jerry', 'Kevin', 'Laura', 'Michael', 'Norbert',
'Oliver', 'Patricia', 'Quinn', 'Ray', 'Sarah', 'Tim', 'Ursula',
'Victor', 'Wendy', 'Xavier', 'Yvonne', 'Zelda']
def make_string(n, rstate):
return rstate.choice(names, size=n)
def make_categorical(n, rstate):
return pd.Categorical.from_codes(rstate.randint(0, len(names), size=n),
names)
make = {float: make_float,
int: make_int,
str: make_string,
object: make_string,
'category': make_categorical}
def make_timeseries_part(start, end, dtypes, freq, state_data):
index = pd.DatetimeIndex(start=start, end=end, freq=freq, name='timestamp')
state = np.random.RandomState(state_data)
columns = dict((k, make[dt](len(index), state)) for k, dt in dtypes.items())
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def make_timeseries(start='2000-01-01',
end='2000-12-31',
dtypes={'name': str, 'id': int, 'x': float, 'y': float},
freq='10s',
partition_freq='1M',
seed=None):
""" Create timeseries dataframe with random data
Parameters
----------
start: datetime (or datetime-like string)
Start of time series
end: datetime (or datetime-like string)
End of time series
dtypes: dict
Mapping of column names to types.
Valid types include {float, int, str, 'category'}
freq: string
String like '2s' or '1H' or '12W' for the time series frequency
partition_freq: string
String like '1M' or '2Y' to divide the dataframe into partitions
seed: int (optional)
Randomstate seed
>>> import dask.dataframe as dd
>>> df = dd.demo.make_timeseries('2000', '2010',
... {'value': float, 'name': str, 'id': int},
... freq='2H', partition_freq='1D', seed=1)
>>> df.head() # doctest: +SKIP
id name value
2000-01-01 00:00:00 969 Jerry -0.309014
2000-01-01 02:00:00 1010 Ray -0.760675
2000-01-01 04:00:00 1016 Patricia -0.063261
2000-01-01 06:00:00 960 Charlie 0.788245
2000-01-01 08:00:00 1031 Kevin 0.466002
"""
divisions = list(pd.DatetimeIndex(start=start, end=end,
freq=partition_freq))
state_data = random_state_data(len(divisions) - 1, seed)
name = 'make-timeseries-' + tokenize(start, end, dtypes, freq,
partition_freq, state_data)
dsk = {(name, i): (make_timeseries_part, divisions[i], divisions[i + 1],
dtypes, freq, state_data[i])
for i in range(len(divisions) - 1)}
head = make_timeseries_part('2000', '2000', dtypes, '1H', state_data[0])
return DataFrame(dsk, name, head, divisions)
def generate_day(date, open, high, low, close, volume,
freq=pd.Timedelta(seconds=60), random_state=None):
""" Generate a day of financial data from open/close high/low values """
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
if not isinstance(date, pd.Timestamp):
date = pd.Timestamp(date)
if not isinstance(freq, pd.Timedelta):
freq = pd.Timedelta(freq)
time = pd.date_range(date + pd.Timedelta(hours=9),
date + pd.Timedelta(hours=12 + 4),
freq=freq / 5, name='timestamp')
n = len(time)
while True:
values = (random_state.random_sample(n) - 0.5).cumsum()
values *= (high - low) / (values.max() - values.min()) # scale
values += np.linspace(open - values[0], close - values[-1],
len(values)) # endpoints
assert np.allclose(open, values[0])
assert np.allclose(close, values[-1])
mx = max(close, open)
mn = min(close, open)
ind = values > mx
values[ind] = (values[ind] - mx) * (high - mx) / (values.max() - mx) + mx
ind = values < mn
values[ind] = (values[ind] - mn) * (low - mn) / (values.min() - mn) + mn
# The process fails if min/max are the same as open close. This is rare
if (np.allclose(values.max(), high) and np.allclose(values.min(), low)):
break
s = pd.Series(values.round(3), index=time)
rs = s.resample(freq)
# TODO: add in volume
return pd.DataFrame({'open': rs.first(),
'close': rs.last(),
'high': rs.max(),
'low': rs.min()})
def daily_stock(symbol, start, stop, freq=pd.Timedelta(seconds=1),
data_source='yahoo', random_state=None):
""" Create artificial stock data
This data matches daily open/high/low/close values from Yahoo! Finance, but
interpolates values within each day with random values. This makes the
results look natural without requiring the downloading of large volumes of
data. This is useful for education and benchmarking.
Parameters
----------
symbol: string
A stock symbol like "GOOG" or "F"
start: date, str, or pd.Timestamp
The start date, input will be fed into pd.Timestamp for normalization
stop: date, str, or pd.Timestamp
The start date, input will be fed into pd.Timestamp for normalization
freq: timedelta, str, or pd.Timedelta
The frequency of sampling
data_source: str, optional
defaults to 'yahoo'. See pandas_datareader.data.DataReader for options
random_state: int, np.random.RandomState object
random seed, defaults to randomly chosen
Examples
--------
>>> import dask.dataframe as dd # doctest: +SKIP
>>> df = dd.demo.daily_stock('GOOG', '2010', '2011', freq='1s') # doctest: +SKIP
>>> df # doctest: +SKIP
Dask DataFrame Structure:
close high low open
npartitions=252
2010-01-04 09:00:00 float64 float64 float64 float64
2010-01-05 09:00:00 ... ... ... ...
... ... ... ... ...
2010-12-31 09:00:00 ... ... ... ...
2010-12-31 16:00:00 ... ... ... ...
Dask Name: from-delayed, 504 tasks
>>> df.head() # doctest: +SKIP
close high low open
timestamp
2010-01-04 09:00:00 626.944 626.964 626.944 626.951
2010-01-04 09:00:01 626.906 626.931 626.906 626.931
2010-01-04 09:00:02 626.901 626.911 626.901 626.905
2010-01-04 09:00:03 626.920 626.920 626.905 626.905
2010-01-04 09:00:04 626.894 626.917 626.894 626.906
"""
from pandas_datareader import data
df = data.DataReader(symbol, data_source, start, stop)
seeds = random_state_data(len(df), random_state=random_state)
parts = []
divisions = []
for i, seed in zip(range(len(df)), seeds):
s = df.iloc[i]
if s.isnull().any():
continue
part = delayed(generate_day)(s.name, s.loc['Open'], s.loc['High'], s.loc['Low'],
s.loc['Close'], s.loc['Volume'],
freq=freq, random_state=seed)
parts.append(part)
divisions.append(s.name + pd.Timedelta(hours=9))
divisions.append(s.name + pd.Timedelta(hours=12 + 4))
meta = generate_day('2000-01-01', 1, 2, 0, 1, 100)
return from_delayed(parts, meta=meta, divisions=divisions)
| gpl-3.0 |
herilalaina/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
chandlercr/aima-python | submissions/Ban/myNN.py | 13 | 3099 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.Ban import county_demographics
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
alumni = DataFrame()
alumni.target = []
alumni.data = []
'''
Extract data from the CORGIS elections, and merge it with the
CORGIS demographics. Both data sets are organized by county and state.
'''
def alumniTarget(string):
if (student['Education']["Bachelor's Degree or Higher"] > 50):
return 1
return 0
demographics = county_demographics.get_all_counties()
for student in demographics:
try:
alumni.target.append(alumniTarget(student['Education']["High School or Higher"]))
college = float(student['Education']["High School or Higher"])
poverty = float(student['Income']["Persons Below Poverty Level"])
ethnicity = float(student['Ethnicities']["White Alone"])
alumni.data.append([college, poverty, ethnicity])
except:
traceback.print_exc()
alumni.feature_names = [
"High School or Higher",
"Persons Below Poverty Level",
"White Alone",
]
alumni.target_names = [
'Most did not graduate college',
'Most did graduate college',
'Ethnicity',
]
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
# hidden_layer_sizes = (100,),
# activation = 'relu',
solver='sgd', # 'adam',
# alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Try scaling the data.
'''
alumniScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(alumni.data)
alumniScaled.data = scaleGrid(alumni.data)
alumniScaled.feature_names = alumni.feature_names
alumniScaled.target = alumni.target
alumniScaled.target_names = alumni.target_names
Examples = {
'AlumniDefault': {
'Poor with degree': alumni,
},
'AlumniSGD': {
'Poor with degree': alumni,
'mlpc': mlpc
},
'AlumniScaled': {
'frame': alumniScaled,
},
} | mit |
ryfeus/lambda-packs | Pandas_numpy/source/pandas/core/reshape/concat.py | 2 | 21484 | """
concat routines
"""
import numpy as np
from pandas import compat, DataFrame, Series, Index, MultiIndex
from pandas.core.index import (_get_objs_combined_axis,
_ensure_index, _get_consensus_names,
_all_indexes_same)
from pandas.core.categorical import (_factorize_from_iterable,
_factorize_from_iterables)
from pandas.core.internals import concatenate_block_managers
from pandas.core import common as com
from pandas.core.generic import NDFrame
import pandas.core.dtypes.concat as _concat
# ---------------------------------------------------------------------
# Concatenate DataFrame objects
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
copy=True):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis(es)
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic
ignore_index : boolean, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys
names : list, default None
Names for the levels in the resulting hierarchical index
verify_integrity : boolean, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation
copy : boolean, default True
If False, do not copy data unnecessarily
Returns
-------
concatenated : object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
pandas objects can be found `here
<http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.
See Also
--------
Series.append
DataFrame.append
DataFrame.join
DataFrame.merge
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2',])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3])
animal letter number
0 NaN a 1
1 NaN b 2
0 cat c 3
1 dog d 4
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a']
"""
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
verify_integrity=verify_integrity,
copy=copy)
return op.get_result()
class _Concatenator(object):
"""
Orchestrates a concatenation operation for BlockManagers
"""
def __init__(self, objs, axis=0, join='outer', join_axes=None,
keys=None, levels=None, names=None,
ignore_index=False, verify_integrity=False, copy=True):
if isinstance(objs, (NDFrame, compat.string_types)):
raise TypeError('first argument must be an iterable of pandas '
'objects, you passed an object of type '
'"{name}"'.format(name=type(objs).__name__))
if join == 'outer':
self.intersect = False
elif join == 'inner':
self.intersect = True
else: # pragma: no cover
raise ValueError('Only can inner (intersect) or outer (union) '
'join the other axis')
if isinstance(objs, dict):
if keys is None:
keys = sorted(objs)
objs = [objs[k] for k in keys]
else:
objs = list(objs)
if len(objs) == 0:
raise ValueError('No objects to concatenate')
if keys is None:
objs = list(com._not_none(*objs))
else:
# #1649
clean_keys = []
clean_objs = []
for k, v in zip(keys, objs):
if v is None:
continue
clean_keys.append(k)
clean_objs.append(v)
objs = clean_objs
name = getattr(keys, 'name', None)
keys = Index(clean_keys, name=name)
if len(objs) == 0:
raise ValueError('All objects passed were None')
# consolidate data & figure out what our result ndim is going to be
ndims = set()
for obj in objs:
if not isinstance(obj, NDFrame):
msg = ('cannot concatenate object of type "{0}";'
' only pd.Series, pd.DataFrame, and pd.Panel'
' (deprecated) objs are valid'.format(type(obj)))
raise TypeError(msg)
# consolidate
obj._consolidate(inplace=True)
ndims.add(obj.ndim)
# get the sample
# want the higest ndim that we have, and must be non-empty
# unless all objs are empty
sample = None
if len(ndims) > 1:
max_ndim = max(ndims)
for obj in objs:
if obj.ndim == max_ndim and np.sum(obj.shape):
sample = obj
break
else:
# filter out the empties if we have not multi-index possibilities
# note to keep empty Series as it affect to result columns / name
non_empties = [obj for obj in objs
if sum(obj.shape) > 0 or isinstance(obj, Series)]
if (len(non_empties) and (keys is None and names is None and
levels is None and
join_axes is None and
not self.intersect)):
objs = non_empties
sample = objs[0]
if sample is None:
sample = objs[0]
self.objs = objs
# Standardize axis parameter to int
if isinstance(sample, Series):
axis = DataFrame()._get_axis_number(axis)
else:
axis = sample._get_axis_number(axis)
# Need to flip BlockManager axis in the DataFrame special case
self._is_frame = isinstance(sample, DataFrame)
if self._is_frame:
axis = 1 if axis == 0 else 0
self._is_series = isinstance(sample, Series)
if not 0 <= axis <= sample.ndim:
raise AssertionError("axis must be between 0 and {ndim}, input was"
" {axis}".format(ndim=sample.ndim, axis=axis))
# if we have mixed ndims, then convert to highest ndim
# creating column numbers as needed
if len(ndims) > 1:
current_column = 0
max_ndim = sample.ndim
self.objs, objs = [], self.objs
for obj in objs:
ndim = obj.ndim
if ndim == max_ndim:
pass
elif ndim != max_ndim - 1:
raise ValueError("cannot concatenate unaligned mixed "
"dimensional NDFrame objects")
else:
name = getattr(obj, 'name', None)
if ignore_index or name is None:
name = current_column
current_column += 1
# doing a row-wise concatenation so need everything
# to line up
if self._is_frame and axis == 1:
name = 0
obj = sample._constructor({name: obj})
self.objs.append(obj)
# note: this is the BlockManager axis (since DataFrame is transposed)
self.axis = axis
self.join_axes = join_axes
self.keys = keys
self.names = names or getattr(keys, 'names', None)
self.levels = levels
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
self.copy = copy
self.new_axes = self._get_new_axes()
def get_result(self):
# series only
if self._is_series:
# stack blocks
if self.axis == 0:
name = com._consensus_name_attr(self.objs)
mgr = self.objs[0]._data.concat([x._data for x in self.objs],
self.new_axes)
cons = _concat._get_series_result_type(mgr, self.objs)
return cons(mgr, name=name).__finalize__(self, method='concat')
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
cons = _concat._get_series_result_type(data)
index, columns = self.new_axes
df = cons(data, index=index)
df.columns = columns
return df.__finalize__(self, method='concat')
# combine block managers
else:
mgrs_indexers = []
for obj in self.objs:
mgr = obj._data
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
if ax == self.axis:
# Suppress reindexing on concat axis
continue
obj_labels = mgr.axes[ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.reindex(new_labels)[1]
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
mgrs_indexers, self.new_axes, concat_axis=self.axis,
copy=self.copy)
if not self.copy:
new_data._consolidate_inplace()
cons = _concat._get_frame_result_type(new_data, self.objs)
return (cons._from_axes(new_data, self.new_axes)
.__finalize__(self, method='concat'))
def _get_result_dim(self):
if self._is_series and self.axis == 1:
return 2
else:
return self.objs[0].ndim
def _get_new_axes(self):
ndim = self._get_result_dim()
new_axes = [None] * ndim
if self.join_axes is None:
for i in range(ndim):
if i == self.axis:
continue
new_axes[i] = self._get_comb_axis(i)
else:
if len(self.join_axes) != ndim - 1:
raise AssertionError("length of join_axes must not be equal "
"to {length}".format(length=ndim - 1))
# ufff...
indices = compat.lrange(ndim)
indices.remove(self.axis)
for i, ax in zip(indices, self.join_axes):
new_axes[i] = ax
new_axes[self.axis] = self._get_concat_axis()
return new_axes
def _get_comb_axis(self, i):
data_axis = self.objs[0]._get_block_manager_axis(i)
try:
return _get_objs_combined_axis(self.objs, axis=data_axis,
intersect=self.intersect)
except IndexError:
types = [type(x).__name__ for x in self.objs]
raise TypeError("Cannot concatenate list of {types}"
.format(types=types))
def _get_concat_axis(self):
"""
Return index to be used along concatenation axis.
"""
if self._is_series:
if self.axis == 0:
indexes = [x.index for x in self.objs]
elif self.ignore_index:
idx = com._default_index(len(self.objs))
return idx
elif self.keys is None:
names = [None] * len(self.objs)
num = 0
has_names = False
for i, x in enumerate(self.objs):
if not isinstance(x, Series):
raise TypeError("Cannot concatenate type 'Series' "
"with object of type {type!r}"
.format(type=type(x).__name__))
if x.name is not None:
names[i] = x.name
has_names = True
else:
names[i] = num
num += 1
if has_names:
return Index(names)
else:
return com._default_index(len(self.objs))
else:
return _ensure_index(self.keys)
else:
indexes = [x._data.axes[self.axis] for x in self.objs]
if self.ignore_index:
idx = com._default_index(sum(len(i) for i in indexes))
return idx
if self.keys is None:
concat_axis = _concat_indexes(indexes)
else:
concat_axis = _make_concat_multiindex(indexes, self.keys,
self.levels, self.names)
self._maybe_check_integrity(concat_axis)
return concat_axis
def _maybe_check_integrity(self, concat_index):
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index.get_duplicates()
raise ValueError('Indexes have overlapping values: '
'{overlap!s}'.format(overlap=overlap))
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _make_concat_multiindex(indexes, keys, levels=None, names=None):
if ((levels is None and isinstance(keys[0], tuple)) or
(levels is not None and len(levels) > 1)):
zipped = compat.lzip(*keys)
if names is None:
names = [None] * len(zipped)
if levels is None:
_, levels = _factorize_from_iterables(zipped)
else:
levels = [_ensure_index(x) for x in levels]
else:
zipped = [keys]
if names is None:
names = [None]
if levels is None:
levels = [_ensure_index(keys)]
else:
levels = [_ensure_index(x) for x in levels]
if not _all_indexes_same(indexes):
label_list = []
# things are potentially different sizes, so compute the exact labels
# for each level and pass those to MultiIndex.from_arrays
for hlevel, level in zip(zipped, levels):
to_concat = []
for key, index in zip(hlevel, indexes):
try:
i = level.get_loc(key)
except KeyError:
raise ValueError('Key {key!s} not in level {level!s}'
.format(key=key, level=level))
to_concat.append(np.repeat(i, len(index)))
label_list.append(np.concatenate(to_concat))
concat_index = _concat_indexes(indexes)
# these go at the end
if isinstance(concat_index, MultiIndex):
levels.extend(concat_index.levels)
label_list.extend(concat_index.labels)
else:
codes, categories = _factorize_from_iterable(concat_index)
levels.append(categories)
label_list.append(codes)
if len(names) == len(levels):
names = list(names)
else:
# make sure that all of the passed indices have the same nlevels
if not len(set([idx.nlevels for idx in indexes])) == 1:
raise AssertionError("Cannot concat indices that do"
" not have the same number of levels")
# also copies
names = names + _get_consensus_names(indexes)
return MultiIndex(levels=levels, labels=label_list, names=names,
verify_integrity=False)
new_index = indexes[0]
n = len(new_index)
kpieces = len(indexes)
# also copies
new_names = list(names)
new_levels = list(levels)
# construct labels
new_labels = []
# do something a bit more speedy
for hlevel, level in zip(zipped, levels):
hlevel = _ensure_index(hlevel)
mapped = level.get_indexer(hlevel)
mask = mapped == -1
if mask.any():
raise ValueError('Values not found in passed level: {hlevel!s}'
.format(hlevel=hlevel[mask]))
new_labels.append(np.repeat(mapped, n))
if isinstance(new_index, MultiIndex):
new_levels.extend(new_index.levels)
new_labels.extend([np.tile(lab, kpieces) for lab in new_index.labels])
else:
new_levels.append(new_index)
new_labels.append(np.tile(np.arange(n), kpieces))
if len(new_names) < len(new_levels):
new_names.extend(new_index.names)
return MultiIndex(levels=new_levels, labels=new_labels, names=new_names,
verify_integrity=False)
| mit |
vortex-ape/scikit-learn | sklearn/externals/joblib/_dask.py | 9 | 8791 | from __future__ import print_function, division, absolute_import
import contextlib
from uuid import uuid4
import weakref
from .parallel import AutoBatchingMixin, ParallelBackendBase, BatchedCalls
from .parallel import parallel_backend
try:
import distributed
except ImportError:
distributed = None
if distributed is not None:
from distributed.client import Client, _wait
from distributed.utils import funcname, itemgetter
from distributed import get_client, secede, rejoin
from distributed.worker import thread_state
from distributed.sizeof import sizeof
from tornado import gen
def is_weakrefable(obj):
try:
weakref.ref(obj)
return True
except TypeError:
return False
class _WeakKeyDictionary:
"""A variant of weakref.WeakKeyDictionary for unhashable objects.
This datastructure is used to store futures for broadcasted data objects
such as large numpy arrays or pandas dataframes that are not hashable and
therefore cannot be used as keys of traditional python dicts.
Futhermore using a dict with id(array) as key is not safe because the
Python is likely to reuse id of recently collected arrays.
"""
def __init__(self):
self._data = {}
def __getitem__(self, obj):
ref, val = self._data[id(obj)]
if ref() is not obj:
# In case of a race condition with on_destroy.
raise KeyError(obj)
return val
def __setitem__(self, obj, value):
key = id(obj)
try:
ref, _ = self._data[key]
if ref() is not obj:
# In case of race condition with on_destroy.
raise KeyError(obj)
except KeyError:
# Insert the new entry in the mapping along with a weakref
# callback to automatically delete the entry from the mapping
# as soon as the object used as key is garbage collected.
def on_destroy(_):
del self._data[key]
ref = weakref.ref(obj, on_destroy)
self._data[key] = ref, value
def __len__(self):
return len(self._data)
def clear(self):
self._data.clear()
def _funcname(x):
try:
if isinstance(x, BatchedCalls):
x = x.items[0][0]
except Exception:
pass
return funcname(x)
class Batch(object):
def __init__(self, tasks):
self.tasks = tasks
def __call__(self, *data):
results = []
with parallel_backend('dask'):
for func, args, kwargs in self.tasks:
args = [a(data) if isinstance(a, itemgetter) else a
for a in args]
kwargs = {k: v(data) if isinstance(v, itemgetter) else v
for (k, v) in kwargs.items()}
results.append(func(*args, **kwargs))
return results
def __reduce__(self):
return Batch, (self.tasks,)
class DaskDistributedBackend(ParallelBackendBase, AutoBatchingMixin):
MIN_IDEAL_BATCH_DURATION = 0.2
MAX_IDEAL_BATCH_DURATION = 1.0
def __init__(self, scheduler_host=None, scatter=None,
client=None, loop=None, **submit_kwargs):
if client is None:
if scheduler_host:
client = Client(scheduler_host, loop=loop,
set_as_default=False)
else:
try:
client = get_client()
except ValueError:
msg = ("To use Joblib with Dask first create a Dask Client"
"\n\n"
" from dask.distributed import Client\n"
" client = Client()\n"
"or\n"
" client = Client('scheduler-address:8786')")
raise ValueError(msg)
self.client = client
if scatter is not None and not isinstance(scatter, (list, tuple)):
raise TypeError("scatter must be a list/tuple, got "
"`%s`" % type(scatter).__name__)
if scatter is not None and len(scatter) > 0:
# Keep a reference to the scattered data to keep the ids the same
self._scatter = list(scatter)
scattered = self.client.scatter(scatter, broadcast=True)
self.data_futures = {id(x): f for x, f in zip(scatter, scattered)}
else:
self._scatter = []
self.data_futures = {}
self.task_futures = set()
self.submit_kwargs = submit_kwargs
def __reduce__(self):
return (DaskDistributedBackend, ())
def get_nested_backend(self):
return DaskDistributedBackend(client=self.client)
def configure(self, n_jobs=1, parallel=None, **backend_args):
return self.effective_n_jobs(n_jobs)
def start_call(self):
self.call_data_futures = _WeakKeyDictionary()
def stop_call(self):
# The explicit call to clear is required to break a cycling reference
# to the futures.
self.call_data_futures.clear()
def effective_n_jobs(self, n_jobs):
return sum(self.client.ncores().values())
def _to_func_args(self, func):
collected_futures = []
itemgetters = dict()
# Futures that are dynamically generated during a single call to
# Parallel.__call__.
call_data_futures = getattr(self, 'call_data_futures', None)
def maybe_to_futures(args):
for arg in args:
arg_id = id(arg)
if arg_id in itemgetters:
yield itemgetters[arg_id]
continue
f = self.data_futures.get(arg_id, None)
if f is None and call_data_futures is not None:
try:
f = call_data_futures[arg]
except KeyError:
if is_weakrefable(arg) and sizeof(arg) > 1e3:
# Automatically scatter large objects to some of
# the workers to avoid duplicated data transfers.
# Rely on automated inter-worker data stealing if
# more workers need to reuse this data
# concurrently.
[f] = self.client.scatter([arg])
call_data_futures[arg] = f
if f is not None:
getter = itemgetter(len(collected_futures))
collected_futures.append(f)
itemgetters[arg_id] = getter
arg = getter
yield arg
tasks = []
for f, args, kwargs in func.items:
args = list(maybe_to_futures(args))
kwargs = dict(zip(kwargs.keys(),
maybe_to_futures(kwargs.values())))
tasks.append((f, args, kwargs))
if not collected_futures:
return func, ()
return (Batch(tasks), collected_futures)
def apply_async(self, func, callback=None):
key = '%s-batch-%s' % (_funcname(func), uuid4().hex)
func, args = self._to_func_args(func)
future = self.client.submit(func, *args, key=key, **self.submit_kwargs)
self.task_futures.add(future)
@gen.coroutine
def callback_wrapper():
result = yield _wait([future])
self.task_futures.remove(future)
if callback is not None:
callback(result) # gets called in separate thread
self.client.loop.add_callback(callback_wrapper)
ref = weakref.ref(future) # avoid reference cycle
def get():
return ref().result()
future.get = get # monkey patch to achieve AsyncResult API
return future
def abort_everything(self, ensure_ready=True):
""" Tell the client to cancel any task submitted via this instance
joblib.Parallel will never access those results
"""
self.client.cancel(self.task_futures)
self.task_futures.clear()
@contextlib.contextmanager
def retrieval_context(self):
"""Override ParallelBackendBase.retrieval_context to avoid deadlocks.
This removes thread from the worker's thread pool (using 'secede').
Seceding avoids deadlock in nested parallelism settings.
"""
# See 'joblib.Parallel.__call__' and 'joblib.Parallel.retrieve' for how
# this is used.
if hasattr(thread_state, 'execution_state'):
# we are in a worker. Secede to avoid deadlock.
secede()
yield
if hasattr(thread_state, 'execution_state'):
rejoin()
| bsd-3-clause |
weissj3/MWTools | Plotting/RA_Distance_Belokurov_Compairison_With_Globulars.py | 1 | 8666 | #!/usr/bin/python
import sys
sys.path.insert(0, '../Newby-tools/utilities')
sys.path.insert(0, '../Scripts')
import numpy as np
import math as ma
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
import astro_coordinates as ac
from matplotlib.ticker import FuncFormatter, MaxNLocator
import scipy.stats as st
import scipy as sc
from copy import deepcopy
import MSTODensity as MSTO
import PlottingUtilities as PU
Primary = int(sys.argv[2])
def format_fn(tick_val, tick_pos):
return PU.mag_dist(tick_val)
SgrMu = [[[], []], [[], []], [[], []], [[], []]]
SgrR = [[[], []], [[], []], [[], []], [[], []]]
SgrT = [[[], []], [[], []], [[], []], [[], []]]
SgrP = [[[], []], [[], []], [[], []], [[], []]]
SgrW = [[[], []], [[], []], [[], []], [[], []]]
f = open(sys.argv[1], 'r')
count = 0
wedge = 9
for line in f:
if count == 0:
count +=1
wedge +=1
continue
if count == 5:
count = 0
continue
Bad = 0
if line[0] == '*':
Bad = 1
line = line[1:]
ln = line.split()
if count == 1:
SgrMu[count-1][Bad].append(float(ln[1]))
SgrR[count-1][Bad].append(float(ln[2]))
SgrT[count-1][Bad].append(float(ln[3]))
SgrP[count-1][Bad].append(float(ln[4]))
SgrW[count-1][Bad].append(wedge)
if count == 2:
SgrMu[count-1][Bad].append(float(ln[1]))
SgrR[count-1][Bad].append(float(ln[2]))
SgrT[count-1][Bad].append(float(ln[3]))
SgrP[count-1][Bad].append(float(ln[4]))
SgrW[count-1][Bad].append(wedge)
if count == 3:
SgrMu[count-1][Bad].append(float(ln[1]))
SgrR[count-1][Bad].append(float(ln[2]))
SgrT[count-1][Bad].append(float(ln[3]))
SgrP[count-1][Bad].append(float(ln[4]))
SgrW[count-1][Bad].append(wedge)
if count == 4:
if len(ln) > 0:
SgrMu[count-1][Bad].append(float(ln[1]))
SgrR[count-1][Bad].append(float(ln[2]))
SgrT[count-1][Bad].append(float(ln[3]))
SgrP[count-1][Bad].append(float(ln[4]))
SgrW[count-1][Bad].append(wedge)
count += 1
wedge = [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]
BelRA = [215, 210, 205, 200, 195, 190, 185, 180, 175, 170, 165]
BelMag = [21.45, 21.4, 21.35, 21.2, 21.1, 20.9, 20.8, 20.65, 20.4, 20.25, 20.2]
BelCRA = [190, 185, 180]
BelCMag = [21.4, 21.35, 21.45]
clusters = PU.checkGlobularClusters()
SgrRA = [[[], []], [[], []], [[], []], [[], []]]
SgrDec = [[[], []], [[], []], [[], []], [[], []]]
SgrMag = [[[], []], [[], []], [[], []], [[], []]]
SgrArrowRA = [[[], []], [[], []], [[], []], [[], []]]
SgrArrowDec = [[[], []], [[], []], [[], []], [[], []]]
SgrArrowMag = [[[], []], [[], []], [[], []], [[], []]]
SgrNorm = [[[], []], [[], []], [[], []], [[], []]]
for k in range(len(SgrMu)):
for j in range(len(SgrMu[k])):
for i in range(len(SgrMu[k][j])):
tmpra, tmpdec = ac.GCToEq(SgrMu[k][j][i], 0.0, SgrW[k][j][i])
SgrRA[k][j].append(tmpra)
SgrDec[k][j].append(tmpdec)
SgrMag[k][j].append(PU.dist_mag(SgrR[k][j][i]))
tmpra, tmpdec, tmpr = ac.streamToEqR(0.0,0.0,1.0,SgrMu[k][j][i], SgrR[k][j][i], SgrT[k][j][i]*ac.deg, SgrP[k][j][i]*ac.deg, SgrW[k][j][i])
SgrArrowRA[k][j].append(tmpra[0] - SgrRA[k][j][i])
SgrArrowDec[k][j].append(tmpdec - SgrDec[k][j][i])
SgrArrowMag[k][j].append(PU.dist_mag(tmpr) - SgrMag[k][j][i])
SgrNorm[k][j].append(ma.sqrt(SgrArrowRA[k][j][i]**2. + SgrArrowDec[k][j][i]**2. + SgrArrowMag[k][j][i]**2.))
plt.figure(1)
for i in range(len(SgrRA)):
plt.plot(SgrRA[i][0], SgrMag[i][0], "bo", label="MW@home")
# plt.plot(SgrRA[i][1], SgrMag[i][1], "ro", label="Bad MW@home")
#plt.plot(SgrRA[1], SgrMag[1], "ro", label="MW@home Bad")
#plt.plot(BifRA[1], BifMag[1], "ro", label="MW@home Bad")
#plt.plot(VirRA[1], VirMag[1], "ro", label="MW@home Bad")
plt.plot(clusters[0], clusters[2], "gd", ms=10, label="Clusters")
#plt.ylim(19,22)
for k in range(len(SgrRA)):
for j in range(len(SgrRA[k])):
for i in range(len(SgrRA[k][j])):
plt.text(SgrRA[k][j][i], SgrMag[k][j][i], str(SgrW[k][j][i])+"."+str(k+1))
for k in range(len(SgrRA)):
for j in range(len(SgrRA[k])):
for i in range(len(SgrRA[k][j])):
plt.arrow(SgrRA[k][j][i], SgrMag[k][j][i], SgrArrowRA[k][j][i]/SgrNorm[k][j][i], SgrArrowMag[k][j][i]/SgrNorm[k][j][i], length_includes_head=True, head_width=.1, head_length=.4)
plt.plot(BelRA, BelMag, "r*", label="Bel Sgr")
plt.plot(BelCRA, BelCMag, "g*", label="Bel C")
plt.legend(loc=4)
plt.xlim(260, 120)
plt.xlabel("RA", fontsize=18)
plt.ylabel("i Magnitude", fontsize=18)
plt.xticks(fontsize=16) # fontsize of the tick labels
plt.yticks(fontsize=16)
ax2 = plt.gca().twinx()
ax2.set_ylabel('Distance (kpc)', fontsize=18)
ax2.set_ylim(14, 24)
ax2.yaxis.set_major_locator(MaxNLocator(24))
ax2.yaxis.set_major_formatter(FuncFormatter(format_fn))
ax2.yaxis.set_tick_params(labelsize=16)
plt.figure(2)
for k in range(len(SgrRA)):
plt.plot(SgrRA[k][0], SgrDec[k][0], "bo", ms=5.0)
plt.plot(SgrRA[k][1], SgrDec[k][1], "ro", ms=10.0)
for k in range(len(SgrRA)):
for j in range(len(SgrRA[k])):
for i in range(len(SgrRA[k][j])):
plt.arrow(SgrRA[k][j][i], SgrDec[k][j][i], SgrArrowRA[k][j][i]/SgrNorm[k][j][i], SgrArrowDec[k][j][i]/SgrNorm[k][j][i], color="white", length_includes_head=False, head_width=.2, head_length=1.0)
for k in range(len(SgrRA)):
for j in range(len(SgrRA[k])):
for i in range(len(SgrRA[k][j])):
plt.text(SgrRA[k][j][i], SgrDec[k][j][i], str(SgrW[k][j][i])+"."+str(k+1), color="white")
stars = [ [], [], [] ]
Simstars = [ [], [], [] ]
if Primary:
high = 15.0
low = 10.0
# stars[0], stars[1], stars[2] = readStarFile_RA("/home/weissj3/Desktop/MWTools/Scripts/PrimaryStars2.txt")
# stars[0], stars[1], stars[2] = readStarFile_RA("/home/weissj3/Desktop/MWTools/Scripts/PrimaryStarsAllSky4Stream.txt")
stars[0], stars[1], stars[2] = PU.readStarFile_RA("/home/weissj3/Desktop/MWTools/Scripts/PrimaryStars_Redone.txt")
temp = np.where((np.array(stars[2]) < high) & (np.array(stars[2]) > low))# and (stars[2] > 24.0))
stars[0] = np.array(stars[0])[temp[0]]
stars[1] = np.array(stars[1])[temp[0]]
stars[2] = np.array(stars[2])[temp[0]]
Simstars[0], Simstars[1], Simstars[2] = PU.readStarFile_RA("/home/weissj3/Desktop/MWTools/Scripts/PrimaryStarsAllSkySim4StreamBG.txt")
temp = np.where((np.array(Simstars[2]) < high) & (np.array(Simstars[2]) > low))# and (stars[2] > 24.0))
Simstars[0] = np.array(Simstars[0])[temp[0]]
Simstars[1] = np.array(Simstars[1])[temp[0]]
Simstars[2] = np.array(Simstars[2])[temp[0]]
else:
for i in range(10, 24):
temp1, temp2, temp3 = PU.readStarFile_lb("/home/weissj3/Desktop/Newby-tools2/milkyway-tools/milkyway_simulated_north/AllSkyRebuildStars-%d.txt" % i)
stars[0] = stars[0] + temp1
stars[1] = stars[1] + temp2
stars[2] = stars[2] + temp3
#stars = PU.removeDuplicates(stars)
print len(stars[0]), len(stars[1])
#PU.plotKernelDensity(stars[0], stars[1], extent=[[120, 260], [-5, 35]], bins=[280j, 80j])
#PU.plotKernelDensityDifference(stars[0], stars[1], Simstars[0], Simstars[1], extent=[[120, 260], [-5, 35]], bins=[280j, 80j])
BGHistogram = PU.flipUD(np.array(MSTO.CreateObservedBackground([int(low), int(high)], "../Scripts/TestSave2_65kpc_BackConvolved.data")))
#BGHistogram = PU.flipUD(np.array(MSTO.CreateObservedBackground([int(low), int(high)], "TestSave3_65kpcConvolvedALast.data")))
#BGHistogram = PU.flipUD(np.array(MSTO.CreateObservedBackground([int(low), int(high)], "TestSave3_65kpc.data")))
starHistogram = PU.flipUD(np.histogram2d(stars[0], stars[1], bins=[280, 80], range=[[120, 260], [-5, 35]])[0].transpose())
stars = np.ma.masked_array(BGHistogram, PU.CreateMask(starHistogram))
#stars2 = np.ma.masked_array(BGHistogram2, PU.CreateMask(starHistogram))
plt.imshow(starHistogram - stars, extent=[120, 260, -5, 35], cmap="binary")
#plt.hist2d(stars[0], stars[1], bins=[280, 80], range=[[120, 260], [-5, 35]], cmap="binary")
#plt.plot(stars[0], stars[1], 'o', ms=0.1) #, bins=[240, 80], range=[[120, 260], [-5, 35]], cmap="binary", vmax=180)
plt.colorbar()
plt.plot(clusters[0], clusters[1], "gd", ms=10, label="Clusters")
plt.xlabel("RA", fontsize=18)
plt.ylabel("Dec", fontsize=18)
plt.xticks(fontsize=16) # fontsize of the tick labels
plt.yticks(fontsize=16) # fontsize of the tick labels
plt.xlim(251, 127)
plt.title("%d kpc to %d kpc background" % (low, high))
plt.show()
| mit |
svalenti/lcogtsnpipe | trunk/bin/calibratemag.py | 1 | 18852 | #!/usr/bin/env python
description = ">> make catalogue from table"
import os
from argparse import ArgumentParser
import lsc
import numpy.ma as np
from numpy import pi, newaxis
import astropy.units as u
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy.io import ascii
import matplotlib.pyplot as plt
from datetime import datetime
import sys
def crossmatch(cat0, cat1, threshold=1., racol0='ra', deccol0='dec', racol1='ra', deccol1='dec', right_join=False):
dra = cat0[racol0] - cat1[racol1][:, newaxis]
ddec = cat0[deccol0] - cat1[deccol1][:, newaxis]
sep = np.sqrt(dra**2 + ddec**2) * 3600.
matches = np.min(sep, axis=1) < threshold
inds = np.argmin(sep, axis=1)
out = Table(cat0[inds], masked=True)
if right_join:
for col in out.colnames:
out[col].mask = ~matches
cat1 = cat1.copy()
else:
out = out[matches]
cat1 = cat1[matches]
return out, cat1
def get_image_data(lista, magcol=None, errcol=None, refcat=None):
filename_equals = ['filename="{}"'.format(os.path.basename(fn).replace('.sn2.fits', '.fits')) for fn in lista]
t = Table(lsc.mysqldef.query(['''select filter, filepath, filename, airmass, shortname, dayobs, instrument,
zcol1, z1, c1, dz1, dc1, zcol2, z2, c2, dz2, dc2, psfmag, psfdmag, apmag, dapmag
from photlco left join telescopes on photlco.telescopeid=telescopes.id where ''' +
' or '.join(filename_equals)], lsc.conn), masked=True)
t['filter'] = [lsc.sites.filterst1[filt] for filt in t['filter']]
if magcol in t.colnames and errcol in t.colnames:
t.rename_column(magcol, 'instmag')
t.rename_column(errcol, 'dinstmag')
elif magcol is not None and errcol is not None:
print 'Cross-matching {} catalogs. This may take a while...'.format(len(lista))
catalogs = []
badrows = []
for i, image in enumerate(t):
sn2file = image['filepath']+image['filename'].replace('.fits', '.sn2.fits')
if os.path.isfile(sn2file):
cat = Table.read(sn2file)
if refcat is None:
refcat = cat
racol1 = 'ra0'
deccol1 = 'dec0'
else:
racol1 = 'ra'
deccol1 = 'dec'
cat_match, _ = crossmatch(cat, refcat, racol0='ra0', deccol0='dec0',
racol1=racol1, deccol1=deccol1, right_join=True)
catalogs.append(cat_match)
else:
badrows.append(i)
t.remove_rows(badrows)
t['ra'] = np.array([cat['ra'] for cat in catalogs])
t['dec'] = np.array([cat['dec'] for cat in catalogs])
t['instmag'] = np.array([cat[magcol] for cat in catalogs])
t['dinstmag'] = np.array([cat[errcol] for cat in catalogs])
for col in t.colnames:
if t[col].dtype.kind == 'f':
t[col].mask |= t[col] >= 9999.
return t
def average_in_flux(mag, dmag, axis=None):
flux = 10**(mag / -2.5)
dflux = np.log(10) / 2.5 * flux * dmag
avg_dflux = np.power(np.sum(np.power(dflux, -2), axis), -0.5)
avg_flux = np.sum(flux * np.power(dflux, -2), axis) * avg_dflux**2
avg_mag = -2.5 * np.log10(avg_flux)
avg_dmag = 2.5 / np.log(10) * np.divide(avg_dflux, avg_flux)
return avg_mag, avg_dmag
def combine_nights(combined_catalog, filterlist, refcat):
header = ['BEGIN CATALOG HEADER',
'nfields 13',
' ra 1 0 d degrees %10.6f',
' dec 2 0 d degrees %10.6f',
' id 3 0 c INDEF %3d']
for filt in filterlist:
header.append(' {} {:2d} 0 r INDEF %6.3f'.format(filt, len(header) - 1))
header.append(' {}err {:2d} 0 r INDEF %6.3f'.format(filt, len(header) - 1))
header += ['END CATALOG HEADER', '']
catalog = Table([refcat['ra'], refcat['dec'], refcat['id']], meta={'comments': header}, masked=True)
for filt in filterlist:
mags = combined_catalog['mag'][combined_catalog['filter'] == filt]
median = np.median(mags, axis=0)
absdev_mag = mags - median
mad = np.median(np.abs(absdev_mag), axis=0) * np.sqrt(pi / 2)
mags.mask |= np.abs(absdev_mag) > 5 * mad
catalog[filt] = np.median(mags, axis=0)
catalog[filt+'err'] = np.median(np.abs(mags - catalog[filt]), axis=0) * np.sqrt(pi / 2)
return catalog
if __name__ == "__main__":
parser = ArgumentParser(description=description)
parser.add_argument("imglist", help="file containing a list of images on which to run")
parser.add_argument("-i", "--interactive", action="store_true")
parser.add_argument("-F", "--force", action="store_true", help="overwrite existing catalogs")
parser.add_argument("-e", "--exzp", help='filename for external zero point from different field')
parser.add_argument("-s", "--stage", default='abscat', choices=['mag', 'abscat', 'local'],
help='calibrate the local sequence (abscat) or the supernova (mag)?')
parser.add_argument("-t", "--typemag", default='fit', choices=['fit', 'ph'],
help='PSF photometry (fit) or aperture photometry (ph)?')
parser.add_argument("-f", "--field", choices=['landolt', 'sloan', 'apass'],
help='Landolt (UBVRI), Sloan (ugriz), or APASS (BVgri) filters?')
parser.add_argument("-c", "--catalog", help="use only stars that match this reference catalog")
parser.add_argument("--minstars", default=0, type=int, help="minimum number of catalog matches for inclusion")
parser.add_argument("-o", "--output", default='{SN}_{field}_{datenow}.cat', help='output filename')
args = parser.parse_args()
with open(args.imglist) as f:
lista = f.read().splitlines()
if not lista:
sys.exit('calibratemag.py: ' + args.imglist + ' is empty')
if args.stage in ['abscat', 'local'] and args.catalog is not None:
try:
refcat = Table.read(args.catalog, format='ascii', fill_values=[('9999.000', '0')])
colnames = [row.split()[0] for row in refcat.meta['comments'] if len(row.split()) == 6]
for old, new in zip(refcat.colnames, colnames):
refcat.rename_column(old, new)
except ascii.core.InconsistentTableError: # real Landolt catalogs are different
refcat = Table.read(args.catalog, format='ascii', names=['id', 'ra', 'dec', 'U', 'B', 'V', 'R', 'I',
'vary', 'Uerr', 'Berr', 'Verr', 'Rerr', 'Ierr', 'col13', 'col14', 'col15', 'col16', 'col17'],
fill_values=[('99.999', '0'), ('9.9999', '0')])
if args.typemag == 'fit' and args.stage == 'mag': # PSF photometry for supernova
targets = get_image_data(lista, 'psfmag', 'psfdmag')
elif args.typemag == 'ph' and args.stage == 'mag': # aperture photometry for supernova
targets = get_image_data(lista, 'apmag', 'dapmag')
elif args.typemag == 'fit': # PSF photometry for local sequence
targets = get_image_data(lista, 'smagf', 'smagerrf', refcat)
elif args.typemag == 'ph': # aperture photometry for local sequence
targets = get_image_data(lista, 'magp3', 'merrp3', refcat)
color_to_use = lsc.sites.chosecolor(targets['filter'], True)
colors_to_calculate = set(sum(color_to_use.values(), []))
# copy average zero points & color terms from the standards to the science images
if args.exzp:
with open(args.exzp) as f:
lista2 = f.read().splitlines()
standards = get_image_data(lista2)
standards = standards.group_by(['dayobs', 'shortname', 'instrument', 'filter', 'zcol1', 'zcol2'])
targets[['zcol1', 'z1', 'dz1', 'c1', 'dc1', 'zcol2', 'z2', 'dz2', 'c2', 'dc2']].mask = True
for group in standards.groups:
matches_in_targets = ((targets['dayobs'] == group['dayobs'][0]) & (targets['shortname'] == group['shortname'][0])
& (targets['instrument'] == group['instrument'][0]) & (targets['filter'] == group['filter'][0]))
if not np.any(matches_in_targets):
continue
targets['zcol1'][matches_in_targets] = group['zcol1'][0]
targets['zcol2'][matches_in_targets] = group['zcol2'][0]
targets['z1'][matches_in_targets], targets['dz1'][matches_in_targets] = average_in_flux(group['z1'], group['dz1'])
targets['z2'][matches_in_targets], targets['dz2'][matches_in_targets] = average_in_flux(group['z2'], group['dz2'])
if np.all(group['dc1']):
dc1 = np.sum(group['dc1']**-2)**-0.5
targets['c1'][matches_in_targets] = np.sum(group['c1'] * group['dc1']**-2) * dc1**2
targets['dc1'][matches_in_targets] = dc1
else:
targets['c1'][matches_in_targets] = np.mean(group['c1'])
targets['dc1'] = 0.
if np.all(group['dc2']):
dc2 = np.sum(group['dc2']**-2)**-0.5
targets['c2'][matches_in_targets] = np.sum(group['c2'] * group['dc2']**-2) * dc2**2
targets['dc2'][matches_in_targets] = dc2
else:
targets['c2'][matches_in_targets] = np.mean(group['c2'])
targets['dc2'] = 0.
# generate average colors for each night at each site
targets['site'] = [row['shortname'].split()[0].lower() if row['shortname'] is not None else None for row in targets]
extinction = [lsc.sites.extinction[row['site']][row['filter']] for row in targets]
targets['instmag_amcorr'] = (targets['instmag'].T - extinction * targets['airmass']).T
targets = targets.group_by(['dayobs', 'shortname', 'instrument'])
for filters in colors_to_calculate:
colors, dcolors = [], []
for group in targets.groups:
f0 = group['filter'] == filters[0]
f1 = group['filter'] == filters[1]
m0, dm0 = average_in_flux(group['instmag_amcorr'][f0], group['dinstmag'][f0], axis=0)
m1, dm1 = average_in_flux(group['instmag_amcorr'][f1], group['dinstmag'][f1], axis=0)
z0, dz0 = average_in_flux(group['z1'][f0], group['dz1'][f0])
z1, dz1 = average_in_flux(group['z2'][f1], group['dz2'][f1])
if np.all(group['dc1'][f0]):
dc0 = np.sum(np.power(group['dc1'][f0], -2))**-0.5
c0 = np.sum(group['c1'][f0] * np.power(group['dc1'][f0], -2)) * dc0**2
else:
dc0 = 0.
c0 = np.mean(group['c1'][f0])
if np.all(group['dc2'][f1]):
dc1 = np.sum(np.power(group['dc2'][f1], -2))**-0.5
c1 = np.sum(group['c2'][f1] * np.power(group['dc2'][f1], -2)) * dc1**2
else:
dc1 = 0.
c1 = np.mean(group['c2'][f1])
color = np.divide(m0 - m1 + z0 - z1, 1 - c0 + c1)
dcolor = np.abs(color) * np.sqrt(
np.divide(dm0**2 + dm1**2 + dz0**2 + dz1**2, (m0 - m1 + z0 - z1)**2)
+ np.divide(dc0**2 + dc1**2, (1 - c0 + c1)**2)
)
for row in group:
colors.append(color)
dcolors.append(dcolor)
targets[filters] = np.array(colors)
targets['d'+filters] = np.array(dcolors)
# calibrate all the instrumental magnitudes
zcol = [color_to_use[row['filter']][0] if color_to_use[row['filter']] else row['filter']*2 for row in targets]
zeropoint = np.choose(zcol == targets['zcol1'], [targets['z2'], targets['z1']])
dzeropoint = np.choose(zcol == targets['zcol1'], [targets['dz2'], targets['dz1']])
colorterm = np.choose(zcol == targets['zcol1'], [targets['c2'], targets['c1']])
dcolorterm = np.choose(zcol == targets['zcol1'], [targets['dc2'], targets['dc1']])
uzcol, izcol = np.unique(zcol, return_inverse=True)
color_used = np.choose(izcol, [targets[col].T if col in targets.colnames else 0. for col in uzcol]).filled(0.) # if no other filter, skip color correction
dcolor_used = np.choose(izcol, [targets['d'+col].T if col in targets.colnames else 0. for col in uzcol]).filled(0.)
targets['mag'] = (targets['instmag_amcorr'].T + zeropoint + colorterm * color_used).T
targets['dmag'] = np.sqrt(targets['dinstmag'].T**2 + dzeropoint**2 + dcolorterm**2 * color_used**2 + colorterm**2 * dcolor_used**2).T
if args.stage == 'mag':
# write mag & dmag to database
targets['dmag'].mask = targets['mag'].mask
query = 'INSERT INTO photlco (filename, mag, dmag) VALUES\n'
query += ',\n'.join(['("{}", {}, {})'.format(row['filename'], row['mag'], row['dmag']) for row in targets.filled(9999.)])
query += '\nON DUPLICATE KEY UPDATE mag=VALUES(mag), dmag=VALUES(dmag)'
print query
lsc.mysqldef.query([query], lsc.conn)
elif args.stage == 'abscat':
# write all the catalogs to files & put filename in database
for row in targets:
good = ~row['mag'].mask
if not np.any(good):
print 'no good magnitudes for', row['filename']
lsc.mysqldef.updatevalue('photlco', 'abscat', 'X', row['filename'])
continue
outtab = Table([row['ra'][good].T, row['dec'][good].T, row['mag'][good].T, row['dmag'][good].T],
meta={'comments': ['daophot+standardfield', ' ra dec {0} d{0}'.format(row['filter'])]})
outtab['col2'].format = '%6.3f'
outtab['col3'].format = '%5.3f'
outfile = row['filename'].replace('.fits', '.cat')
try:
outtab.write(row['filepath'] + outfile, format='ascii.fixed_width_no_header', delimiter='',
overwrite=args.force, fill_values=[(ascii.masked, '9999.0')])
lsc.mysqldef.updatevalue('photlco', 'abscat', outfile, row['filename'])
except IOError as e:
print e, '-- use -F to overwrite'
elif args.stage == 'local':
if args.field == 'landolt':
filterlist = ['U', 'B', 'V', 'R', 'I']
elif args.field == 'sloan':
filterlist = ['u', 'g', 'r', 'i', 'z']
elif args.field == 'apass':
filterlist = ['B', 'V', 'g', 'r', 'i']
else:
raise Exception('Need to give --field (landolt, sloan, apass) for -s local')
# make master catalog and write to file
catalog = combine_nights(targets, filterlist, refcat)
catfile = os.path.basename(args.catalog)
if args.interactive:
plt.ion()
fig = plt.figure(1, figsize=(11, 8.5))
for filt in filterlist:
nightly_by_filter = targets[(targets['filter'] == filt) & (np.sum(~targets['mag'].mask, axis=1) > args.minstars)]
if not nightly_by_filter:
print 'no calibrated stars in', filt
continue
fig.clear()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax1.set_title('Filter: ' + filt)
lgd_handle = ax1.plot(nightly_by_filter['mag'].data.T, color='k', alpha=0.5, marker='_', ls='none')[:1]
lgd_handle.append(ax1.errorbar(range(len(catalog)), catalog[filt], yerr=catalog[filt+'err'], marker='o', ls='none'))
lgd_label = ['individual images', 'output (median of images)']
if filt in refcat.colnames:
lgd_handle += ax1.plot(refcat[filt], marker='o', mfc='none', ls='none', zorder=10)
lgd_label.append(catfile)
ax1.invert_yaxis()
ax1.set_xlabel('Star ID in {} (line number)'.format(catfile))
ax1.set_ylabel('Apparent Magnitude')
ax1.legend(lgd_handle, lgd_label, loc='best')
nightly_by_filter['offset'] = nightly_by_filter['mag'] - catalog[filt].T
if filt in refcat.colnames:
nightly_by_filter['offset from refcat'] = nightly_by_filter['mag'] - refcat[filt].T
ax2.axhline(0., color='k', lw=1)
lgd_handle = ax2.plot(nightly_by_filter['offset'], color='k', alpha=0.5, marker='_', ls='none')[:1]
lgd_handle += ax2.plot(np.median(nightly_by_filter['offset'], axis=1), marker='o', ls='none')
lgd_label = ['individual stars', 'median offset']
if filt in refcat.colnames:
lgd_handle += ax2.plot(np.median(nightly_by_filter['offset from refcat'], axis=1), marker='o', mfc='none', ls='none')
lgd_label.append('offset from ' + catfile)
ax2.set_xticks(range(len(nightly_by_filter)))
ax2.set_xticklabels(nightly_by_filter['filename'], rotation='vertical', size='xx-small')
ax2.set_ylabel('Offset from Median (mag)')
ax2.legend(lgd_handle, lgd_label, loc='best')
fig.set_tight_layout(True)
fig.subplots_adjust(bottom=0.28, hspace=0.33)
fig.canvas.draw_idle()
if filt in refcat.colnames:
plt.figure(2)
plt.clf()
ax3 = plt.subplot(111)
ax3.axhline(0., color='k', lw=1)
diffs = (catalog[filt] - refcat[filt]).data
median_diff = np.median(diffs)
ax3.plot(catalog[filt], diffs, label='individual stars', marker='o', ls='none')
ax3.axhline(median_diff, label='median: {:.3f} mag'.format(median_diff), ls='--', lw=1)
ax3.set_title('Filter: ' + filt)
ax3.set_xlabel('Apparent Magnitude in Output Catalog')
ax3.set_ylabel('Output - {} (mag)'.format(catfile))
ax3.legend(loc='best')
plt.draw()
raw_input('Press enter to continue.')
snname = os.path.basename(catfile).split('_')[0] if args.catalog else 'catalog'
filename = args.output.format(SN=snname, field=args.field,
datenow=datetime.now().strftime('%Y%m%d_%H_%M'))
catalog['ra'].format = '%10.6f'
catalog['dec'].format = '%10.6f'
for col in catalog.colnames[3:]:
catalog[col].format = '%6.3f'
catalog.write(filename, format='ascii.fixed_width_no_header', delimiter='',
fill_values=[(ascii.masked, '9999.0')])
print 'catalog written to', filename
| mit |
MockyJoke/numbers | ex6/code/ab_analysis.py | 1 | 2539 |
# coding: utf-8
# In[1]:
import sys
import pandas as pd
import numpy as np
import difflib
import gzip
from scipy import stats
def main():
OUTPUT_TEMPLATE = (
'"Did more/less users use the search feature?" p-value: {more_users_p:.3g}\n'
'"Did users search more/less?" p-value: {more_searches_p:.3g}\n'
'"Did more/less instructors use the search feature?" p-value: {more_instr_p:.3g}\n'
'"Did instructors search more/less?" p-value: {more_instr_searches_p:.3g}'
)
# searchdata_file = sys.argv[1]
# ...
filename = sys.argv[1]
# filename = "searches.json"
searches = pd.read_json(filename,orient='records', lines=True)
even_samples = searches[searches['uid'] % 2 == 0]
odd_samples = searches[searches['uid'] % 2 != 0]
even_searched = even_samples[even_samples['search_count'] != 0].shape[0]
even_unsearched = even_samples[even_samples['search_count'] == 0].shape[0]
odd_searched = odd_samples[odd_samples['search_count'] != 0].shape[0]
odd_unsearched = odd_samples[odd_samples['search_count'] == 0].shape[0]
p_more_searches = stats.mannwhitneyu(even_samples['search_count'],odd_samples['search_count']).pvalue
contingency = [[even_searched, even_unsearched], [odd_searched, odd_unsearched]]
chi2, p_more_users, dof, expected = stats.chi2_contingency(contingency)
inst_samples = searches[searches['is_instructor']]
inst_even_samples = inst_samples[inst_samples['uid'] % 2 == 0]
inst_odd_samples = inst_samples[inst_samples['uid'] % 2 != 0]
p_more_instr_searches = stats.mannwhitneyu(inst_even_samples['search_count'],inst_odd_samples['search_count']).pvalue
inst_even_searched = inst_even_samples[inst_even_samples['search_count'] != 0].shape[0]
inst_even_unsearched = inst_even_samples[inst_even_samples['search_count'] == 0].shape[0]
inst_odd_searched = inst_odd_samples[inst_odd_samples['search_count'] != 0].shape[0]
inst_odd_unsearched = inst_odd_samples[inst_odd_samples['search_count'] == 0].shape[0]
inst_contingency = [[inst_even_searched, inst_even_unsearched], [inst_odd_searched, inst_odd_unsearched]]
inst_chi2, p_more_instr, inst_dof, inst_expected = stats.chi2_contingency(inst_contingency)
# Output
print(OUTPUT_TEMPLATE.format(
more_users_p=p_more_users,
more_searches_p=p_more_searches,
more_instr_p=p_more_instr,
more_instr_searches_p=p_more_instr_searches,
))
if __name__ == '__main__':
main()
| mit |
abimannans/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
NelisVerhoef/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 292 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
Vimos/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 124 | 1877 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
l1_color = "navy"
l2_color = "c"
elastic_net_color = "darkorange"
lw = 2
plt.plot(xs, l1(xs), color=l1_color, label="L1", lw=lw)
plt.plot(xs, -1.0 * l1(xs), color=l1_color, lw=lw)
plt.plot(-1 * xs, l1(xs), color=l1_color, lw=lw)
plt.plot(-1 * xs, -1.0 * l1(xs), color=l1_color, lw=lw)
plt.plot(xs, l2(xs), color=l2_color, label="L2", lw=lw)
plt.plot(xs, -1.0 * l2(xs), color=l2_color, lw=lw)
plt.plot(-1 * xs, l2(xs), color=l2_color, lw=lw)
plt.plot(-1 * xs, -1.0 * l2(xs), color=l2_color, lw=lw)
plt.plot(xs, el(xs, alpha), color=elastic_net_color, label="Elastic Net", lw=lw)
plt.plot(xs, -1.0 * el(xs, alpha), color=elastic_net_color, lw=lw)
plt.plot(-1 * xs, el(xs, alpha), color=elastic_net_color, lw=lw)
plt.plot(-1 * xs, -1.0 * el(xs, alpha), color=elastic_net_color, lw=lw)
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
jlegendary/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
AnasGhrab/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |