repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
CalvinNeo/PyGeo | countpca_segmentation_2.py | 1 | 7423 | #coding:utf8
import numpy as np, scipy
import pylab as pl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
from matplotlib import cm
from matplotlib import mlab
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from itertools import *
import collections
from multiprocessing import Pool
import random
from scipy.optimize import leastsq
from adasurf import AdaSurfConfig, adasurf, paint_surfs, identifysurf, point_normalize, Surface
ELAPSE_SEG = 0
class SurfSegConfig:
def __init__(self):
self.slice_count = 4
self.origin_points = 5
self.most_combination_points = 20
self.same_threshold = 0.1 # the smaller, the more accurate when judging two surfaces are identical, more surfaces can be generated
self.pointsame_threshold = 1.0
self.filter_rate = 0.08
self.filter_count = 50
self.ori_adarate = 2.0
self.step_adarate = 1.0
self.max_adarate = 2.0
self.split_by_count = True
self.weak_abort = 45
def paint_points(points, show = True, title = '', xlim = None, ylim = None, zlim = None):
fig = pl.figure()
ax = fig.add_subplot(111, projection='3d')
if xlim == None:
xlim = (np.min(points[:, 0]), np.max(points[:, 0]))
if ylim == None:
ylim = (np.min(points[:, 1]), np.max(points[:, 1]))
if zlim == None:
zlim = (np.min(points[:, 2]), np.max(points[:, 2]))
x1 = points[:, 0]
y1 = points[:, 1]
z1 = points[:, 2]
ax.scatter(x1, y1, z1, c='r')
ax.set_zlim(zlim[0], zlim[1])
ax.set_ylim(ylim[0], ylim[1])
ax.set_xlim(xlim[0], xlim[1])
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
pl.title(title)
if show:
pl.show()
return fig
def surf_segmentation(points, config, paint_when_end = False):
global ELAPSE_SEG
config.slice_count = min(int(len(points) / config.origin_points), config.slice_count)
assert len(points) / config.slice_count >= config.origin_points
adasurconfig = AdaSurfConfig({'origin_points': config.origin_points
, 'most_combination_points': config.most_combination_points
, 'same_threshold': config.same_threshold
, 'filter_rate': config.filter_rate
, 'ori_adarate': config.ori_adarate
, 'step_adarate': config.step_adarate
, 'max_adarate': config.max_adarate
, 'pointsame_threshold': config.pointsame_threshold
, 'filter_count' : config.filter_count
, 'weak_abort' : config.weak_abort
})
surfs = []
slice_fig = []
npoints = point_normalize(points)
starttime = time.clock()
xlim = (np.min(npoints[:, 0]), np.max(npoints[:, 0]))
ylim = (np.min(npoints[:, 1]), np.max(npoints[:, 1]))
zlim = (np.min(npoints[:, 2]), np.max(npoints[:, 2]))
pca_md = mlab.PCA(np.copy(npoints))
projection0_direction = None
# projection0_direction = pca_md.Y[0]
# projection0 = np.inner(projection0_direction, npoints)
projection0 = npoints[:, 0]
if config.split_by_count:
step_count = len(projection0) / config.slice_count
pointsets = [np.array([]).reshape(0,3)] * config.slice_count
sorted_projection0_index = np.argsort(projection0)
current_slot_count, ptsetid = 0, 0
for index in sorted_projection0_index:
pointsets[ptsetid] = np.vstack((pointsets[ptsetid], npoints[index, :]))
current_slot_count += 1
if current_slot_count > step_count:
current_slot_count = 0
ptsetid += 1
else:
projection0min, projection0max = np.min(projection0), np.max(projection0)
step_len = (projection0max - projection0min) / config.slice_count
pointsets = [np.array([]).reshape(0,3)] * config.slice_count
for i in xrange(len(projection0)):
if projection0[i] == projection0max:
ptsetid = config.slice_count - 1
else:
ptsetid = int((projection0[i] - projection0min) / step_len)
pointsets[ptsetid] = np.vstack((pointsets[ptsetid], npoints[i]))
# random.shuffle(pointsets)
partial_surfs, fail = [], np.array([]).reshape(0,3)
# for (ptset, ptsetindex) in zip(pointsets, range(len(pointsets))):
# print "slice", len(ptset), xlim, ylim, zlim
# paint_points(ptset, xlim = xlim, ylim = ylim, zlim = zlim)
for (ptset, ptsetindex) in zip(pointsets, range(len(pointsets))):
print "--------------------------------------"
print "before segment", ptsetindex, '/', len(pointsets)
print 'derived surfs:'
# print '---000', ptset.shape, np.array(fail).shape, np.array(fail), fail
if fail == None:
allptfortest = np.array(ptset)
else:
allptfortest = np.vstack((ptset, np.array(fail).reshape(-1,3)))
print "len of surf is: ", len(partial_surfs), ", len of points is: ", len(allptfortest)
if allptfortest != None and len(allptfortest) > 0 :
partial_surfs, _, fail, extradata = identifysurf(allptfortest, adasurconfig, donorm = False, surfs = partial_surfs, title = str(ptsetindex)
, paint_when_end = paint_when_end, current_direction = projection0_direction)
if paint_when_end:
slice_fig.append(extradata[0])
if fail == None:
print "after segment", ptsetindex, "len of surf", len(partial_surfs), "fail is None", fail
else:
print "after segment", ptsetindex, "len of surf", len(partial_surfs), "len of fail", len(fail)
for x in partial_surfs:
x.printf()
surfs.extend(partial_surfs)
# fig = pl.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter(npoints[:, 0], npoints[:, 1], npoints[:, 2], c='r')
# x = np.linspace(0, pca_md.Wt[0, 0] * 100, 300)
# y = np.linspace(0, pca_md.Wt[0, 1] * 100, 300)
# z = np.linspace(0, pca_md.Wt[0, 2] * 100, 300)
# ax.plot(x, y, z, c='k')
# x = np.linspace(0, pca_md.Wt[1, 0] * 100, 300)
# y = np.linspace(0, pca_md.Wt[1, 1] * 100, 300)
# z = np.linspace(0, pca_md.Wt[1, 2] * 100, 300)
# ax.plot(x, y, z, c='g')
# pl.show()
return surfs, npoints, (slice_fig, )
if __name__ == '__main__':
c = np.loadtxt('5.py', comments='#')
config = SurfSegConfig()
print 'config', config.__dict__
import time
starttime = time.clock()
surfs, npoints, extradata = surf_segmentation(c, config, paint_when_end = True)
print "----------BELOW ARE SURFACES---------- count:", len(surfs)
print 'TOTAL: ', time.clock() - starttime
print 'ELAPSE_SEG: ', ELAPSE_SEG
ALL_POINT = 0
for s,i in zip(surfs, range(len(surfs))):
print "SURFACE ", i
print s.args # surface args
print s.residuals # MSE
print len(s.points)
ALL_POINT += len(s.points)
# print s[2] # npoints
print '**************************************'
print 'ALL_POINT: ', ALL_POINT
print '----------BELOW ARE POINTS----------'
# for s,i in zip(surfs, range(len(surfs))):
# print "SURFACE ", i
# print s.points
paint_surfs(surfs, npoints, 'all')
print extradata
for slice_fig in extradata[0]:
slice_fig.show()
| apache-2.0 |
fmacias64/deap | examples/es/cma_plotting.py | 12 | 4326 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import numpy
from deap import algorithms
from deap import base
from deap import benchmarks
from deap import cma
from deap import creator
from deap import tools
import matplotlib.pyplot as plt
# Problem size
N = 10
NGEN = 125
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("evaluate", benchmarks.rastrigin)
def main(verbose=True):
# The cma module uses the numpy random number generator
numpy.random.seed(64)
# The CMA-ES algorithm takes a population of one individual as argument
# The centroid is set to a vector of 5.0 see http://www.lri.fr/~hansen/cmaes_inmatlab.html
# for more details about the rastrigin and other tests for CMA-ES
strategy = cma.Strategy(centroid=[5.0]*N, sigma=5.0, lambda_=20*N)
toolbox.register("generate", strategy.generate, creator.Individual)
toolbox.register("update", strategy.update)
halloffame = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
# Objects that will compile the data
sigma = numpy.ndarray((NGEN,1))
axis_ratio = numpy.ndarray((NGEN,1))
diagD = numpy.ndarray((NGEN,N))
fbest = numpy.ndarray((NGEN,1))
best = numpy.ndarray((NGEN,N))
std = numpy.ndarray((NGEN,N))
for gen in range(NGEN):
# Generate a new population
population = toolbox.generate()
# Evaluate the individuals
fitnesses = toolbox.map(toolbox.evaluate, population)
for ind, fit in zip(population, fitnesses):
ind.fitness.values = fit
# Update the strategy with the evaluated individuals
toolbox.update(population)
# Update the hall of fame and the statistics with the
# currently evaluated population
halloffame.update(population)
record = stats.compile(population)
logbook.record(evals=len(population), gen=gen, **record)
if verbose:
print(logbook.stream)
# Save more data along the evolution for latter plotting
# diagD is sorted and sqrooted in the update method
sigma[gen] = strategy.sigma
axis_ratio[gen] = max(strategy.diagD)**2/min(strategy.diagD)**2
diagD[gen, :N] = strategy.diagD**2
fbest[gen] = halloffame[0].fitness.values
best[gen, :N] = halloffame[0]
std[gen, :N] = numpy.std(population, axis=0)
# The x-axis will be the number of evaluations
x = list(range(0, strategy.lambda_ * NGEN, strategy.lambda_))
avg, max_, min_ = logbook.select("avg", "max", "min")
plt.figure()
plt.subplot(2, 2, 1)
plt.semilogy(x, avg, "--b")
plt.semilogy(x, max_, "--b")
plt.semilogy(x, min_, "-b")
plt.semilogy(x, fbest, "-c")
plt.semilogy(x, sigma, "-g")
plt.semilogy(x, axis_ratio, "-r")
plt.grid(True)
plt.title("blue: f-values, green: sigma, red: axis ratio")
plt.subplot(2, 2, 2)
plt.plot(x, best)
plt.grid(True)
plt.title("Object Variables")
plt.subplot(2, 2, 3)
plt.semilogy(x, diagD)
plt.grid(True)
plt.title("Scaling (All Main Axes)")
plt.subplot(2, 2, 4)
plt.semilogy(x, std)
plt.grid(True)
plt.title("Standard Deviations in All Coordinates")
plt.show()
if __name__ == "__main__":
main(False)
| lgpl-3.0 |
asnorkin/parapapapam | ensemble/_ensemble.py | 1 | 10880 | import numpy as np
from sklearn.model_selection import cross_val_predict, cross_val_score, StratifiedKFold
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.svm import SVC
from ..metrics import METRICS
class Blender:
def make_greedy_blend(self, X, y, models, scoring=None, cv=3,
proba=True, random_state=42, verbose=False):
"""
This func makes greedy blend for many models.
Attributes
----------
X : array-like
The data to fit.
y : array-like
The target variable to try to predict.
models : list
List of models to blend.
scoring : string or callable, optional
Scoring function from sklearn.
cv : int, cross validation generator, optional, default: 3
Cross validation from sklearn or number of folds.
proba : bool, optional, default: True
If true than probabilities were predicted else labels.
random_state : int, optional, default: 42
Returns
-------
"""
try:
metric = METRICS[scoring]
except KeyError:
metrics = [metric for metric in METRICS]
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(metrics)))
if isinstance(cv, int):
cv = StratifiedKFold(n_splits=cv, shuffle=True, random_state=random_state)
scores, preds = self._evaluate_models(X, y, models, metric, cv)
models, scores = np.array(models), np.array(scores)
isorted = np.argsort(scores)[::-1]
blend = BlendClassifier((models[isorted][0],), (1,))
best_scores, best_pred = np.array([scores[isorted][0]]), preds[isorted][0]
if verbose:
print('First blending model:\n{}\nScore: {}'
.format(models[isorted][0], scores[isorted][0]))
for model, pred in zip(models[isorted][1:], preds[isorted][1:]):
score, alpha = self._blend_two_preds(y, best_pred, pred, metric, cv)
if alpha != 1 and score > best_scores[-1]:
blend = blend.get_updated_classifier((model, ), (1 - alpha, ))
best_scores = np.append(best_scores, score)
best_pred = alpha * best_pred + (1 - alpha) * pred
if verbose:
print('The model added to blending:\n{}\nCoef: {}\nNew score: {}'
.format(model, 1 - alpha, score))
elif verbose:
print('The model is not added to blending:\n{}'
.format(model))
return blend, best_scores
def blend_two_models(self, X, y, est1, est2, scoring, cv,
proba=True, random_state=42):
"""
This func blends two estimators as the following combination:
alpha * est1_prediction + (1 - alpha) * est2_prediction
and finds the best combination.
Attributes
----------
X : array-like
The data to fit.
y : array-like
The target variable to try to predict.
est1 : estimator
The first estimator to blend.
est2 : estimator
The second estimator to blend.
scoring : string or callable, optional
Scoring function from sklearn.
cv : int, cross validation generator, optional, default: 3
Cross validation from sklearn or number of folds.
proba : bool, optional, default: True
If true than probabilities were predicted else labels.
random_state : int, optional, default: 42
Returns
-------
best_score : float
The best score of blending.
best_alpha : float
The alpha parameter of best blending combination.
"""
try:
metric = METRICS[scoring]
except KeyError:
metrics = [metric for metric in METRICS]
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(metrics)))
weights = np.linspace(0, 1, 101)
method = 'predict_proba' if proba else 'predict'
if isinstance(cv, int):
cv = StratifiedKFold(n_splits=cv, random_state=random_state)
preds1 = cross_val_predict(est1, X, y, cv=cv, method=method)
preds2 = cross_val_predict(est2, X, y, cv=cv, method=method)
if proba:
preds1, preds2 = preds1[:, 1], preds2[:, 1]
best_score, best_alpha = metric(y, preds1), 1
for idx, alpha in enumerate(weights):
preds = alpha * preds1 + (1 - alpha) * preds2
score = metric(y, preds)
if score > best_score:
best_score = score
best_alpha = alpha
return best_score, best_alpha
def _blend_two_preds(self, y, pred1, pred2, metric, cv):
weights = np.linspace(0, 1, 101)
best_score, best_alpha = metric(y, pred1), 1
for idx, alpha in enumerate(weights):
preds = alpha * pred1 + (1 - alpha) * pred2
score = metric(y, preds)
if score > best_score:
best_score = score
best_alpha = alpha
return best_score, best_alpha
def _evaluate_models(self, X, y, models, metric, cv, proba=True):
scores = []
preds = []
method = 'predict_proba' if proba else 'predict'
for model in models:
preds.append(cross_val_predict(model, X, y, cv=cv, method=method))
scores.append(metric(y, preds[-1]))
return scores, preds
def _get_n_best_estimators_from_each_class(self, task_manager, n, classes):
if classes is None:
classes = task_manager.get_done_model_classes()
models = []
for cls in classes:
models.extend(task_manager.get_best_models(cls, n))
return list(reversed(sorted(models, key=lambda x: x[0])))
def _get_models_with_scores(self, models, scores):
return np.array(list(reversed(sorted(zip(scores, models)))))
class BlendClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, estimators=(SVC(),), coefs=(1,)):
"""
Estimator which result is blending of many models.
Parameters
----------
estimators : tuple of classifiers, optional, default: (SVC(),)
The tuple of classifiers to blend.
coefs : tuple of numbers (float, int), optional, default: (1,)
The tuple of coefficients for classifiers blending.
"""
self._check_params(estimators, coefs)
self.estimators = estimators
self.coefs = coefs
def __repr__(self):
output = ''
for idx, (est, coef) in enumerate(zip(self.estimators, self.coefs)):
output += 'step {}. {} : {}\n'.format(idx + 1, est, coef)
return output
@property
def n_estimators(self):
return len(self.estimators)
def fit(self, X, y):
for est in self.estimators:
est.fit(X, y)
return self
def predict(self, X):
result = np.zeros(len(X))
for coef, est in zip(self.coefs, self.estimators):
result += coef * est.predict(X)
return result
def predict_proba(self, X):
result = np.zeros((len(X), 2))
for coef, est in zip(self.coefs, self.estimators):
result += coef * est.predict_proba(X)
return result
def get_updated_classifier(self, new_estimators, new_coefs):
"""
This func makes updated blending classifier
and returns new blending classifier.
Parameters
----------
new_estimators : tuple of estimators
The tuple of new models to blend.
new_coefs : tuple of numbers (float, int)
The tuple of coefficients of new models
If sum of coefficients were equal to 1 then all models will be added
with saving that rule. I.e. models will be added one by one and coefs
will be updated by the following algorithm:
- we have coefs array and new_coef to be added
- coefs = coefs * (1 - new_coef)
- coefs.append(new_coef)
Returns
-------
blend_clf : BlendClassifier
Updated blend classifier
"""
_estimators = self.estimators + new_estimators
_coefs = self.coefs
if self._check_coefs_sum(verbose=True):
# Append each coefficient saving whole sum equal to 1
for coef in new_coefs:
_coefs = tuple(map(lambda x: x * (1 - coef), self.coefs))
_coefs = _coefs + (coef,)
else:
_coefs += new_coefs
blend_clf = BlendClassifier(_estimators, _coefs)
return blend_clf
def _check_coefs_sum(self, verbose=True):
if len(self.coefs) > 0 and sum(self.coefs) != 1:
if verbose:
print('WARNING: the sum of coefficients is not equal to 1.')
return False
return True
def _check_array_elems_type(self, arr, types):
for elem in arr:
if not isinstance(elem, types):
return False
return True
def _get_models_and_coefs(self, *args):
if len(args) % 2:
raise ValueError('Number of model and number of coefficients must be equal.')
if len(args) == 2 and isinstance(args[0], (list, tuple)):
models, coefs = args[0], args[1]
else:
models, coefs = args[:len(args) / 2], args[len(args) / 2:]
if not self._check_array_elems_type(models, BaseEstimator):
raise ValueError('All models must have some of estimator type.')
if not self._check_array_elems_type(coefs, (float, int)):
raise ValueError('All coefficients must be float.')
return models, coefs
def _check_params(self, estimators, coefs):
if len(estimators) != len(coefs):
raise ValueError('Number of estimators and number of coefficients must be the same.\n'
'Given estimators parameter has len {} and coefs parameter has len {}'
.format(len(estimators), len(coefs)))
if not isinstance(estimators, tuple):
raise ValueError('The estimators parameter must be a tuple type.\n'
'Given estimators parameter have {} type'.format(type(estimators)))
if not isinstance(coefs, tuple):
raise ValueError('The coefs must be a tuple type.\n'
'Given coefs parameter have {} type'.format(type(coefs))) | mit |
jjx02230808/project0223 | examples/linear_model/plot_lasso_and_elasticnet.py | 73 | 2074 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
skearnes/color-features | paper/code/analysis.py | 1 | 12160 | """Analyze results.
Use the saved model output to calculate AUC and other metrics.
"""
import collections
import cPickle as pickle
import gflags as flags
import gzip
import logging
import numpy as np
import os
import pandas as pd
from sklearn import metrics
from statsmodels.stats import proportion
import sys
flags.DEFINE_string('root', None, 'Root directory containing model results.')
flags.DEFINE_string('dataset_file', None, 'Filename containing datasets.')
flags.DEFINE_string('prefix', None, 'Dataset prefix.')
flags.DEFINE_boolean('tversky', False, 'If True, use Tversky features.')
flags.DEFINE_integer('num_folds', 5, 'Number of cross-validation folds.')
flags.DEFINE_boolean('cycle', False,
'If True, expect multiple query molecules.')
flags.DEFINE_string('reload', None, 'Load previously analyzed results.')
flags.DEFINE_string('subset', None, 'Subset.')
FLAGS = flags.FLAGS
logging.getLogger().setLevel(logging.INFO)
FEATURES_MAP = {
'rocs': 'TanimotoCombo',
'shape_color': 'ST-CT',
'shape_color_components': 'ST-CCT',
'shape_color_overlaps': 'ST-CAO',
'shape_color_components_overlaps': 'ST-CCT-CAO',
'rocs_tversky': 'TverskyCombo',
'shape_color_tversky': 'STv-CTv',
'shape_color_components_tversky': 'STv-CCTv',
'shape_color_components_tversky_overlaps': 'STv-CCTv-CAO',
}
MODEL_MAP = {
'logistic': 'LR',
'random_forest': 'RF',
'svm': 'SVM',
}
def roc_enrichment(fpr, tpr, target_fpr):
"""Get ROC enrichment."""
assert fpr[0] == 0
assert fpr[-1] == 1
assert np.all(np.diff(fpr) >= 0)
return np.true_divide(np.interp(target_fpr, fpr, tpr), target_fpr)
def get_cv_metrics(y_true, y_pred):
"""Get 5-fold mean AUC."""
assert len(y_true) == len(y_pred)
fold_metrics = collections.defaultdict(list)
for yt, yp in zip(y_true, y_pred):
assert len(yt) == len(yp)
fold_metrics['auc'].append(metrics.roc_auc_score(yt, yp))
fpr, tpr, _ = metrics.roc_curve(yt, yp)
for x in [0.005, 0.01, 0.02, 0.05, 0.1, 0.2]:
fold_metrics['e-%g' % x].append(roc_enrichment(fpr, tpr, x))
return fold_metrics
def add_rows(features, scores, rows, dataset, index=None):
"""Record per-fold and averaged cross-validation results."""
for fold in range(len(scores['auc'])):
row = {'dataset': dataset, 'features': features, 'fold': fold}
if index is not None:
row['index'] = index
for key, values in scores.iteritems():
row[key] = values[fold]
rows.append(row)
# Averages
row = {'dataset': dataset, 'features': features, 'fold': 'all'}
if index is not None:
row['index'] = index
for key, values in scores.iteritems():
row[key] = np.mean(values)
rows.append(row)
def load_output_and_calculate_metrics(model, subset):
"""Calculate metrics using saved model output.
Args:
model: String model type (e.g. logistic).
subset: String query subset (e.g. omega1).
Returns:
DataFrame containing calculated metrics for each model/subset, including
per-fold and average values for each reference molecule.
"""
with open(FLAGS.dataset_file) as f:
datasets = [line.strip() for line in f]
rows = []
for dataset in datasets:
ref_idx = 0
while True: # Cycle through reference molecules.
ref_idx_exists = get_ref_rows(model, subset, dataset, ref_idx, rows)
if not FLAGS.cycle or not ref_idx_exists:
break
ref_idx += 1
logging.info('%s\t%d', dataset, ref_idx)
return pd.DataFrame(rows)
def get_ref_rows(model, subset, dataset, ref_idx, rows):
logging.debug('ref_idx %d', ref_idx)
for features in FEATURES_MAP.keys():
logging.debug('Features: %s', features)
fold_y_true = []
fold_y_pred = []
for fold_idx in range(FLAGS.num_folds):
filename = get_output_filename(dataset, model, subset, features,
fold_idx, ref_idx)
if not os.path.exists(filename):
return False
logging.debug(filename)
with gzip.open(filename) as f:
df = pickle.load(f)
fold_y_true.append(df['y_true'].values)
fold_y_pred.append(df['y_pred'].values)
scores = get_cv_metrics(fold_y_true, fold_y_pred)
add_rows(features, scores, rows, dataset, index=ref_idx)
return True
def get_output_filename(dataset, model, subset, features, fold_idx, ref_idx):
if FLAGS.cycle:
filename = os.path.join(
'%s-%s' % (FLAGS.root, subset),
dataset,
'fold-%d' % fold_idx,
'%s-%s-%s-%s-%s-fold-%d-ref-%d-output.pkl.gz' % (
FLAGS.prefix, dataset, model, subset, features,
fold_idx, ref_idx))
else:
assert ref_idx == 0
filename = os.path.join(
'%s-%s' % (FLAGS.root, subset),
dataset,
'fold-%d' % fold_idx,
'%s-%s-%s-%s-%s-fold-%d-output.pkl.gz' % (
FLAGS.prefix, dataset, model, subset, features,
fold_idx))
return filename
def load_data(model, subset):
data = []
with open(FLAGS.dataset_file) as f:
for line in f:
dataset = line.strip()
filename = os.path.join(FLAGS.root, '%s-%s-%s-%s.pkl.gz' % (
FLAGS.prefix, dataset, model, subset))
assert os.path.exists(filename)
logging.info(filename)
with gzip.open(filename) as g:
df = pickle.load(g)
data.append(df)
return pd.concat(data)
def confidence_interval(delta, metric):
"""Calculate a two-sided 95% confidence interval for differences."""
# Wilson score interval for sign test.
num_successes = np.count_nonzero(delta > 0)
num_trials = np.count_nonzero(delta != 0) # Exclude zero differences.
lower, upper = proportion.proportion_confint(
num_successes, num_trials, alpha=0.05, method='wilson')
median_delta = delta.median()
if metric == 'auc':
median = r'%.3f' % median_delta
ci = r'(%.2f, %.2f)' % (lower, upper)
else:
median = r'%.0f' % median_delta
ci = r'(%.2f, %.2f)' % (lower, upper)
if lower < 0.5 and upper < 0.5:
median = r'\bfseries \color{red} ' + median
ci = r'\bfseries \color{red} ' + ci
elif lower > 0.5 and upper > 0.5:
median = r'\bfseries ' + median
ci = r'\bfseries ' + ci
return median, ci
def data_table(data, subsets, models, kind=None, tversky=False):
"""Get medians and compare everything to ROCS.
Args:
data: DataFrame containing model performance.
subsets: List of query subsets.
models: List of models to include in the table.
kind: List of metrics to report. Defaults to ['auc'].
tversky: Boolean whether to use Tversky features. If False, use Tanimoto
features.
"""
if kind is None:
kind = ['auc']
if tversky:
rocs_baseline = 'rocs_tversky'
features_order = ['shape_color_tversky',
'shape_color_components_tversky',
'shape_color_overlaps',
'shape_color_components_tversky_overlaps']
else:
rocs_baseline = 'rocs'
features_order = ['shape_color', 'shape_color_components',
'shape_color_overlaps',
'shape_color_components_overlaps']
table = []
# Get ROCS row.
row = [r'\cellcolor{white} ROCS', FEATURES_MAP[rocs_baseline]]
for subset in subsets:
rocs_mask = ((data['features'] == rocs_baseline) &
(data['subset'] == subset) &
(data['model'] == models[0]))
rocs_df = data[rocs_mask]
logging.info('Confidence interval N = %d', len(rocs_df))
logging.info('Number of datasets = %d',
len(pd.unique(rocs_df['dataset'])))
for metric in kind:
if metric == 'auc':
number = '%.3f'
else:
number = '%.0f'
row.extend([number % rocs_df[metric].median(), '', ''])
table.append(' & '.join(row))
# Get model rows.
for model in models:
for features in features_order:
if features == features_order[-1]:
row = [r'\multirow{-%d}{*}{\cellcolor{white} %s}' % (
len(features_order), MODEL_MAP[model])]
else:
row = [r'\cellcolor{white}']
row.append(FEATURES_MAP[features])
for subset in subsets:
mask = ((data['features'] == features) &
(data['subset'] == subset) &
(data['model'] == model))
df = data[mask]
rocs_mask = ((data['features'] == rocs_baseline) &
(data['subset'] == subset) &
(data['model'] == model))
rocs_df = data[rocs_mask]
for metric in kind:
if metric == 'auc':
number = '%.3f'
else:
number = '%.0f'
row.append(number % df[metric].median())
if features == rocs_baseline:
row.append('')
row.append('')
else:
assert np.array_equal(df['dataset'].values,
rocs_df['dataset'].values)
if 'index' in df.columns:
assert np.array_equal(df['index'].values,
rocs_df['index'].values)
delta = df.copy()
delta[metric] -= rocs_df[metric].values
row.extend(confidence_interval(delta[metric], metric))
table.append(' & '.join(row))
print ' \\\\\n'.join(table)
def main():
if FLAGS.prefix == 'muv':
subsets = ['omega1']
assert FLAGS.cycle
elif FLAGS.prefix == 'dude':
subsets = ['xtal', 'omega1']
elif FLAGS.prefix == 'chembl':
subsets = ['omega1']
assert FLAGS.cycle
else:
raise ValueError(FLAGS.prefix)
if FLAGS.subset is not None:
subsets = [FLAGS.subset]
# Load data from output or previously processed.
models = ['logistic', 'random_forest', 'svm']
if FLAGS.reload is not None:
logging.info('Loading processed data from %s', FLAGS.reload)
data = pd.read_pickle(FLAGS.reload)
else:
data = []
for model in models:
for subset in subsets:
logging.info('%s\t%s', model, subset)
df = load_output_and_calculate_metrics(model, subset)
df['model'] = model
df['subset'] = subset
data.append(df)
data = pd.concat(data)
# Save processed data.
filename = '%s-processed.pkl.gz' % FLAGS.prefix
logging.info('Saving processed data to %s', filename)
with gzip.open(filename, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
# Only keep 5-fold mean information.
mask = data['fold'] == 'all'
data = data[mask]
# AUC tables.
# Combine subsets into a single table here.
logging.info('AUC table')
data_table(data, subsets, models, kind=['auc'], tversky=FLAGS.tversky)
# Enrichment tables.
# One per FPR.
for metric in ['e-0.005', 'e-0.01', 'e-0.02', 'e-0.05']:
logging.info('Metric: %s', metric)
logging.info('Enrichment table')
data_table(data, subsets, models, kind=[metric], tversky=FLAGS.tversky)
if __name__ == '__main__':
flags.MarkFlagAsRequired('root')
flags.MarkFlagAsRequired('dataset_file')
flags.MarkFlagAsRequired('prefix')
FLAGS(sys.argv)
main()
| bsd-3-clause |
manuelli/director | src/python/director/planplayback.py | 1 | 7857 | import os
import vtkAll as vtk
import math
import time
import re
import numpy as np
from director.timercallback import TimerCallback
from director import objectmodel as om
from director.simpletimer import SimpleTimer
from director.utime import getUtime
from director import robotstate
import copy
import pickle
import scipy.interpolate
def asRobotPlan(msg):
'''
If the given message is a robot_plan_with_supports_t then this function returns
the plan message contained within it. For any other message type, this function
just returns its input argument.
'''
try:
import drc as lcmdrc
except ImportError:
pass
else:
if isinstance(msg, lcmdrc.robot_plan_with_supports_t):
return msg.plan
return msg
class PlanPlayback(object):
def __init__(self):
self.animationCallback = None
self.animationTimer = None
self.interpolationMethod = 'slinear'
self.playbackSpeed = 1.0
self.jointNameRegex = ''
@staticmethod
def getPlanPoses(msgOrList):
if isinstance(msgOrList, list):
messages = msgOrList
allPoseTimes, allPoses = PlanPlayback.getPlanPoses(messages[0])
for msg in messages[1:]:
poseTimes, poses = PlanPlayback.getPlanPoses(msg)
poseTimes += allPoseTimes[-1]
allPoseTimes = np.hstack((allPoseTimes, poseTimes[1:]))
allPoses += poses[1:]
return allPoseTimes, allPoses
else:
msg = asRobotPlan(msgOrList)
poses = []
poseTimes = []
for plan in msg.plan:
pose = robotstate.convertStateMessageToDrakePose(plan)
poseTimes.append(plan.utime / 1e6)
poses.append(pose)
return np.array(poseTimes), poses
@staticmethod
def getPlanElapsedTime(msg):
msg = asRobotPlan(msg)
startTime = msg.plan[0].utime
endTime = msg.plan[-1].utime
return (endTime - startTime) / 1e6
@staticmethod
def mergePlanMessages(plans):
msg = copy.deepcopy(plans[0])
for plan in plans[1:]:
plan = copy.deepcopy(plan)
lastTime = msg.plan[-1].utime
for state in plan.plan:
state.utime += lastTime
msg.plan_info += plan.plan_info
msg.plan += plan.plan
msg.num_states = len(msg.plan)
return msg
@staticmethod
def isPlanInfoFeasible(info):
return 0 <= info < 10
@staticmethod
def isPlanFeasible(plan):
plan = asRobotPlan(plan)
return plan is not None and (max(plan.plan_info) < 10 and min(plan.plan_info) >= 0)
def stopAnimation(self):
if self.animationTimer:
self.animationTimer.stop()
def setInterpolationMethod(method):
self.interpolationMethod = method
def playPlan(self, msg, jointController):
self.playPlans([msg], jointController)
def playPlans(self, messages, jointController):
assert len(messages)
poseTimes, poses = self.getPlanPoses(messages)
self.playPoses(poseTimes, poses, jointController)
def getPoseInterpolatorFromPlan(self, message):
poseTimes, poses = self.getPlanPoses(message)
return self.getPoseInterpolator(poseTimes, poses)
def getPoseInterpolator(self, poseTimes, poses, unwrap_rpy=True):
if unwrap_rpy:
poses = np.array(poses, copy=True)
poses[:,3:6] = np.unwrap(poses[:,3:6],axis=0)
if self.interpolationMethod in ['slinear', 'quadratic', 'cubic']:
f = scipy.interpolate.interp1d(poseTimes, poses, axis=0, kind=self.interpolationMethod)
elif self.interpolationMethod == 'pchip':
f = scipy.interpolate.PchipInterpolator(poseTimes, poses, axis=0)
return f
def getPlanPoseMeshes(self, messages, jointController, robotModel, numberOfSamples):
poseTimes, poses = self.getPlanPoses(messages)
f = self.getPoseInterpolator(poseTimes, poses)
sampleTimes = np.linspace(poseTimes[0], poseTimes[-1], numberOfSamples)
meshes = []
for sampleTime in sampleTimes:
pose = f(sampleTime)
jointController.setPose('plan_playback', pose)
polyData = vtk.vtkPolyData()
robotModel.model.getModelMesh(polyData)
meshes.append(polyData)
return meshes
def showPoseAtTime(self, time, jointController, poseInterpolator):
pose = poseInterpolator(time)
jointController.setPose('plan_playback', pose)
def playPoses(self, poseTimes, poses, jointController):
f = self.getPoseInterpolator(poseTimes, poses)
timer = SimpleTimer()
def updateAnimation():
tNow = timer.elapsed() * self.playbackSpeed
if tNow > poseTimes[-1]:
pose = poses[-1]
jointController.setPose('plan_playback', pose)
if self.animationCallback:
self.animationCallback()
return False
pose = f(tNow)
jointController.setPose('plan_playback', pose)
if self.animationCallback:
self.animationCallback()
self.animationTimer = TimerCallback()
self.animationTimer.targetFps = 60
self.animationTimer.callback = updateAnimation
self.animationTimer.start()
updateAnimation()
def picklePlan(self, filename, msg):
poseTimes, poses = self.getPlanPoses(msg)
pickle.dump((poseTimes, poses), open(filename, 'w'))
def getMovingJointNames(self, msg):
poseTimes, poses = self.getPlanPoses(msg)
diffs = np.diff(poses, axis=0)
jointIds = np.unique(np.where(diffs != 0.0)[1])
jointNames = [robotstate.getDrakePoseJointNames()[jointId] for jointId in jointIds]
return jointNames
def plotPlan(self, msg):
poseTimes, poses = self.getPlanPoses(msg)
self.plotPoses(poseTimes, poses)
def plotPoses(self, poseTimes, poses):
import matplotlib.pyplot as plt
poses = np.array(poses)
if self.jointNameRegex:
jointIds = range(poses.shape[1])
else:
diffs = np.diff(poses, axis=0)
jointIds = np.unique(np.where(diffs != 0.0)[1])
jointNames = [robotstate.getDrakePoseJointNames()[jointId] for jointId in jointIds]
jointTrajectories = [poses[:,jointId] for jointId in jointIds]
seriesNames = []
sampleResolutionInSeconds = 0.01
numberOfSamples = (poseTimes[-1] - poseTimes[0]) / sampleResolutionInSeconds
xnew = np.linspace(poseTimes[0], poseTimes[-1], numberOfSamples)
fig = plt.figure()
ax = fig.add_subplot(111)
for jointId, jointName, jointTrajectory in zip(jointIds, jointNames, jointTrajectories):
if self.jointNameRegex and not re.match(self.jointNameRegex, jointName):
continue
x = poseTimes
y = jointTrajectory
y = np.rad2deg(y)
if self.interpolationMethod in ['slinear', 'quadratic', 'cubic']:
f = scipy.interpolate.interp1d(x, y, kind=self.interpolationMethod)
elif self.interpolationMethod == 'pchip':
f = scipy.interpolate.PchipInterpolator(x, y)
ax.plot(x, y, 'ko')
seriesNames.append(jointName + ' points')
ax.plot(xnew, f(xnew), '-')
seriesNames.append(jointName + ' ' + self.interpolationMethod)
ax.legend(seriesNames, loc='upper right').draggable()
ax.set_xlabel('time (s)')
ax.set_ylabel('joint angle (deg)')
ax.set_title('joint trajectories')
plt.show()
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/pandas/tests/test_msgpack/test_except.py | 15 | 1043 | #!/usr/bin/env python
# coding: utf-8
import unittest
import nose
import datetime
from pandas.msgpack import packb, unpackb
class DummyException(Exception):
pass
class TestExceptions(unittest.TestCase):
def test_raise_on_find_unsupported_value(self):
import datetime
self.assertRaises(TypeError, packb, datetime.datetime.now())
def test_raise_from_object_hook(self):
def hook(obj):
raise DummyException
self.assertRaises(DummyException, unpackb, packb({}), object_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': 'buzz'}), object_pairs_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_hook=hook)
self.assertRaises(DummyException, unpackb, packb({'fizz': {'buzz': 'spam'}}), object_pairs_hook=hook)
def test_invalidvalue(self):
self.assertRaises(ValueError, unpackb, b'\xd9\x97#DL_')
| mit |
druce/safewithdrawal_tensorflow | lifetable.py | 1 | 8359 | import numpy as np
import pandas as pd
from pandas import DataFrame
############################################################
# Life tables
# https://www.ssa.gov/oact/STATS/table4c6.html
############################################################
############################################################
# Male life table
############################################################
# survivors from 100000 births
MlivesArray = [100000, 99348, 99302, 99273, 99252, 99235, 99219, 99205, 99192, 99180,
99170, 99161, 99151, 99138, 99119, 99091, 99052, 99003, 98943, 98870,
98785, 98685, 98572, 98449, 98321, 98191, 98060, 97928, 97795, 97659,
97519, 97376, 97230, 97080, 96927, 96772, 96612, 96448, 96277, 96097,
95908, 95708, 95493, 95262, 95012, 94739, 94441, 94115, 93759, 93368,
92940, 92472, 91961, 91406, 90804, 90153, 89450, 88693, 87883, 87022,
86112, 85147, 84125, 83042, 81899, 80691, 79412, 78054, 76613, 75084,
73461, 71732, 69889, 67930, 65853, 63657, 61329, 58859, 56249, 53504,
50629, 47621, 44484, 41233, 37890, 34482, 31040, 27598, 24201, 20896,
17735, 14768, 12043, 9599, 7463, 5647, 4157, 2977, 2075, 1410,
935, 605, 380, 232, 137, 78, 43, 23, 11, 5,
2, 1, 0, 0, 0, 0, 0, 0, 0, 0, ]
MlivesSeries = pd.Series(MlivesArray)
# life expectancy
MLEarray = [76.28, 75.78, 74.82, 73.84, 72.85, 71.87, 70.88, 69.89, 68.9, 67.9,
66.91, 65.92, 64.92, 63.93, 62.94, 61.96, 60.99, 60.02, 59.05, 58.09,
57.14, 56.2, 55.27, 54.33, 53.4, 52.47, 51.54, 50.61, 49.68, 48.75,
47.82, 46.89, 45.96, 45.03, 44.1, 43.17, 42.24, 41.31, 40.38, 39.46,
38.53, 37.61, 36.7, 35.78, 34.88, 33.98, 33.08, 32.19, 31.32, 30.44,
29.58, 28.73, 27.89, 27.05, 26.23, 25.41, 24.61, 23.82, 23.03, 22.25,
21.48, 20.72, 19.97, 19.22, 18.48, 17.75, 17.03, 16.32, 15.61, 14.92,
14.24, 13.57, 12.92, 12.27, 11.65, 11.03, 10.43, 9.85, 9.28, 8.73,
8.2, 7.68, 7.19, 6.72, 6.27, 5.84, 5.43, 5.04, 4.68, 4.34,
4.03, 3.74, 3.47, 3.23, 3.01, 2.82, 2.64, 2.49, 2.36, 2.24,
2.12, 2.01, 1.9, 1.8, 1.7, 1.6, 1.51, 1.42, 1.34, 1.26,
1.18, 1.11, 1.04, 0.97, 0.9, 0.84, 0.78, 0.72, 0.67, 0.61,
]
MLEseries = pd.Series(MLEarray)
# death probability
MdeathrateArray = [0.006519, 0.000462, 0.000291, 0.000209, 0.000176, 0.000159, 0.000146, 0.000133, 0.000118, 0.000102,
0.000091, 0.000096, 0.000128, 0.000195, 0.000288, 0.000389, 0.000492, 0.000607, 0.000735, 0.000869,
0.001011, 0.001145, 0.001246, 0.001301, 0.001321, 0.00133, 0.001345, 0.001363, 0.001391, 0.001427,
0.001467, 0.001505, 0.001541, 0.001573, 0.001606, 0.001648, 0.001704, 0.001774, 0.001861, 0.001967,
0.002092, 0.00224, 0.002418, 0.002629, 0.002873, 0.003146, 0.003447, 0.003787, 0.004167, 0.004586,
0.005038, 0.00552, 0.006036, 0.006587, 0.00717, 0.007801, 0.008466, 0.009133, 0.009792, 0.010462,
0.011197, 0.012009, 0.012867, 0.013772, 0.014749, 0.015852, 0.017097, 0.018463, 0.019959, 0.021616,
0.023528, 0.025693, 0.028041, 0.030567, 0.033347, 0.036572, 0.040276, 0.044348, 0.048797, 0.053739,
0.059403, 0.065873, 0.073082, 0.08107, 0.089947, 0.099842, 0.110863, 0.123088, 0.136563, 0.151299,
0.167291, 0.18452, 0.202954, 0.222555, 0.243272, 0.263821, 0.283833, 0.302916, 0.320672, 0.336706,
0.353541, 0.371218, 0.389779, 0.409268, 0.429732, 0.451218, 0.473779, 0.497468, 0.522341, 0.548458,
0.575881, 0.604675, 0.634909, 0.666655, 0.699987, 0.734987, 0.771736, 0.810323, 0.850839, 0.893381,
]
MdeathrateSeries = pd.Series(MdeathrateArray)
MlivesSeries.to_csv('MLivesSeries.csv', index_label='index')
MLEseries.to_csv('MLEseries.csv', index_label='index')
MdeathrateSeries.to_csv('MdeathrateSeries.csv', index_label='index')
############################################################
# Female life table
############################################################
FlivesArray = [100000, 99462, 99425, 99403, 99387, 99373, 99361, 99351, 99341, 99331,
99322, 99312, 99303, 99291, 99278, 99262, 99243, 99220, 99194, 99165,
99132, 99095, 99054, 99010, 98963, 98915, 98864, 98811, 98755, 98697,
98635, 98569, 98500, 98426, 98348, 98265, 98176, 98081, 97979, 97870,
97753, 97627, 97491, 97343, 97182, 97004, 96810, 96597, 96364, 96109,
95829, 95524, 95193, 94834, 94449, 94038, 93598, 93126, 92623, 92090,
91526, 90927, 90287, 89600, 88858, 88054, 87177, 86223, 85187, 84069,
82864, 81561, 80147, 78616, 76961, 75177, 73244, 71148, 68888, 66467,
63880, 61114, 58159, 55016, 51694, 48205, 44565, 40796, 36933, 33017,
29104, 25257, 21542, 18027, 14775, 11839, 9267, 7083, 5285, 3852,
2745, 1909, 1292, 850, 541, 333, 197, 112, 61, 31,
15, 7, 3, 1, 0, 0, 0, 0, 0, 0,]
FlivesSeries = pd.Series(FlivesArray)
FLEarray = [81.05, 80.49, 79.52, 78.54, 77.55, 76.56, 75.57, 74.58, 73.58, 72.59,
71.6, 70.6, 69.61, 68.62, 67.63, 66.64, 65.65, 64.67, 63.68, 62.7,
61.72, 60.75, 59.77, 58.8, 57.82, 56.85, 55.88, 54.91, 53.94, 52.97,
52.01, 51.04, 50.08, 49.11, 48.15, 47.19, 46.23, 45.28, 44.33, 43.37,
42.43, 41.48, 40.54, 39.6, 38.66, 37.73, 36.81, 35.89, 34.97, 34.06,
33.16, 32.27, 31.38, 30.49, 29.62, 28.74, 27.88, 27.01, 26.16, 25.31,
24.46, 23.62, 22.78, 21.95, 21.13, 20.32, 19.52, 18.73, 17.95, 17.18,
16.43, 15.68, 14.95, 14.23, 13.53, 12.83, 12.16, 11.5, 10.86, 10.24,
9.64, 9.05, 8.48, 7.94, 7.42, 6.92, 6.44, 5.99, 5.57, 5.17,
4.8, 4.45, 4.13, 3.84, 3.57, 3.34, 3.12, 2.93, 2.76, 2.6,
2.45, 2.3, 2.17, 2.03, 1.91, 1.78, 1.67, 1.56, 1.45, 1.35,
1.26, 1.17, 1.08, 1, 0.92, 0.85, 0.78, 0.72, 0.67, 0.61,]
FLEseries = pd.Series(FLEarray)
FdeathrateArray =[0.005377, 0.000379, 0.000221, 0.000162, 0.000133, 0.000119, 0.000109, 0.000101, 0.000096, 0.000093,
0.000094, 0.0001, 0.000112, 0.000134, 0.000162, 0.000194, 0.000226, 0.000261, 0.000297, 0.000334,
0.000373, 0.000412, 0.000446, 0.000472, 0.000493, 0.000513, 0.000537, 0.000563, 0.000593, 0.000627,
0.000664, 0.000705, 0.000748, 0.000794, 0.000845, 0.000903, 0.000968, 0.001038, 0.001113, 0.001196,
0.001287, 0.001393, 0.001517, 0.001662, 0.001827, 0.002005, 0.002198, 0.002412, 0.002648, 0.002904,
0.003182, 0.003473, 0.003767, 0.004058, 0.004352, 0.004681, 0.00504, 0.0054, 0.005756, 0.006128,
0.006545, 0.007034, 0.007607, 0.008281, 0.009057, 0.009953, 0.01095, 0.01201, 0.013124, 0.01433,
0.015728, 0.017338, 0.019108, 0.021041, 0.023191, 0.025713, 0.028609, 0.03176, 0.035157, 0.03892,
0.043289, 0.048356, 0.054041, 0.060384, 0.067498, 0.075516, 0.084556, 0.094703, 0.106014, 0.118513,
0.132206, 0.147092, 0.163154, 0.180371, 0.198714, 0.217264, 0.235735, 0.25381, 0.271155, 0.287424,
0.30467, 0.32295, 0.342327, 0.362867, 0.384639, 0.407717, 0.43218, 0.458111, 0.485597, 0.514733,
0.545617, 0.578354, 0.613055, 0.649839, 0.688829, 0.730159, 0.771736, 0.810323, 0.850839, 0.893381,]
FdeathrateSeries = pd.Series(FdeathrateArray)
def genLifetable(livesSeries, leSeries, ret_age, ret_length):
"""Create frame with life expectancies, lives"""
# check bounds of lifetable
# assert start age, end age between the two
# 2nd version - take DataFrame where everything lines up by age
end_age = ret_age + ret_length
survival = livesSeries[ret_age:end_age]
survival = survival / float(survival[ret_age])
deathrate = survival - survival.shift(-1)
deathrate.ix[end_age-1] = 1 - np.sum(deathrate)
lifetable = DataFrame(survival, columns=['survival'])
LE = leSeries[ret_age:end_age]
lifetable['life_expectancy'] = LE
lifetable['deathrate'] = deathrate
return lifetable
| mit |
ianctse/pvlib-python | pvlib/test/test_clearsky.py | 1 | 6604 | import logging
pvl_logger = logging.getLogger('pvlib')
import numpy as np
import pandas as pd
from nose.tools import raises
from numpy.testing import assert_almost_equal
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pvlib.location import Location
from pvlib import clearsky
from pvlib import solarposition
from . import requires_scipy
# setup times and location to be tested.
tus = Location(32.2, -111, 'US/Arizona', 700)
times = pd.date_range(start='2014-06-24', end='2014-06-25', freq='3h')
times_localized = times.tz_localize(tus.tz)
ephem_data = solarposition.get_solarposition(times_localized, tus.latitude,
tus.longitude)
@requires_scipy
def test_ineichen_required():
# the clearsky function should call lookup_linke_turbidity by default
expected = pd.DataFrame(
np.array([[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 51.47811191, 265.33462162, 84.48262202],
[ 105.008507 , 832.29100407, 682.67761951],
[ 121.97988054, 901.31821834, 1008.02102657],
[ 112.57957512, 867.76297247, 824.61702926],
[ 76.69672675, 588.8462898 , 254.5808329 ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ]]),
columns=['dhi', 'dni', 'ghi'],
index=times_localized)
out = clearsky.ineichen(times_localized, tus.latitude, tus.longitude)
assert_frame_equal(expected, out)
def test_ineichen_supply_linke():
expected = pd.DataFrame(np.array(
[[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 40.16490879, 321.71856556, 80.12815294],
[ 95.14336873, 876.49252839, 703.47605855],
[ 118.4587024 , 939.81646535, 1042.34480815],
[ 105.36645492, 909.11265773, 851.32459694],
[ 61.91187639, 647.35889938, 257.42691896],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ]]),
columns=['dhi', 'dni', 'ghi'],
index=times_localized)
out = clearsky.ineichen(times_localized, tus.latitude, tus.longitude,
altitude=tus.altitude,
linke_turbidity=3)
assert_frame_equal(expected, out)
def test_ineichen_solpos():
clearsky.ineichen(times_localized, tus.latitude, tus.longitude,
linke_turbidity=3,
solarposition_method='ephemeris')
def test_ineichen_airmass():
expected = pd.DataFrame(
np.array([[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 53.90422388, 257.01655613, 85.87406435],
[ 101.34055688, 842.92925705, 686.39337307],
[ 117.7573735 , 909.70367947, 1012.04184961],
[ 108.6233401 , 877.30589626, 828.49118038],
[ 75.23108133, 602.06895546, 257.10961202],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ]]),
columns=['dhi', 'dni', 'ghi'],
index=times_localized)
out = clearsky.ineichen(times_localized, tus.latitude, tus.longitude,
linke_turbidity=3,
airmass_model='simple')
assert_frame_equal(expected, out)
@requires_scipy
def test_lookup_linke_turbidity():
times = pd.date_range(start='2014-06-24', end='2014-06-25',
freq='12h', tz=tus.tz)
# expect same value on 2014-06-24 0000 and 1200, and
# diff value on 2014-06-25
expected = pd.Series(np.array([3.10126582, 3.10126582, 3.11443038]),
index=times)
out = clearsky.lookup_linke_turbidity(times, tus.latitude, tus.longitude)
assert_series_equal(expected, out)
@requires_scipy
def test_lookup_linke_turbidity_nointerp():
times = pd.date_range(start='2014-06-24', end='2014-06-25',
freq='12h', tz=tus.tz)
# expect same value for all days
expected = pd.Series(np.array([3., 3., 3.]), index=times)
out = clearsky.lookup_linke_turbidity(times, tus.latitude, tus.longitude,
interp_turbidity=False)
assert_series_equal(expected, out)
@requires_scipy
def test_lookup_linke_turbidity_months():
times = pd.date_range(start='2014-04-01', end='2014-07-01',
freq='1M', tz=tus.tz)
expected = pd.Series(np.array([2.8943038, 2.97316456, 3.18025316]),
index=times)
out = clearsky.lookup_linke_turbidity(times, tus.latitude,
tus.longitude)
assert_series_equal(expected, out)
@requires_scipy
def test_lookup_linke_turbidity_nointerp_months():
times = pd.date_range(start='2014-04-10', end='2014-07-10',
freq='1M', tz=tus.tz)
expected = pd.Series(np.array([2.85, 2.95, 3.]), index=times)
out = clearsky.lookup_linke_turbidity(times, tus.latitude, tus.longitude,
interp_turbidity=False)
assert_series_equal(expected, out)
# changing the dates shouldn't matter if interp=False
times = pd.date_range(start='2014-04-05', end='2014-07-05',
freq='1M', tz=tus.tz)
out = clearsky.lookup_linke_turbidity(times, tus.latitude, tus.longitude,
interp_turbidity=False)
assert_series_equal(expected, out)
def test_haurwitz():
expected = pd.DataFrame(np.array([[0.],
[0.],
[82.85934048],
[699.74514735],
[1016.50198354],
[838.32103769],
[271.90853863],
[0.],
[0.]]),
columns=['ghi'], index=times_localized)
out = clearsky.haurwitz(ephem_data['zenith'])
assert_frame_equal(expected, out)
| bsd-3-clause |
akloster/bokeh | bokeh/charts/_data_adapter.py | 43 | 8802 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the ChartObject class, a minimal prototype class to build more chart
types on top of it. It provides the mechanisms to support the shared chained
methods.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
from collections import OrderedDict
from ..properties import bokeh_integer_types, Datetime
try:
import numpy as np
except ImportError:
np = None
try:
import pandas as pd
except ImportError:
pd = None
try:
import blaze
except ImportError:
blaze=None
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
DEFAULT_INDEX_ALIASES = list('abcdefghijklmnopqrstuvz1234567890')
DEFAULT_INDEX_ALIASES += list(zip(DEFAULT_INDEX_ALIASES, DEFAULT_INDEX_ALIASES))
class DataAdapter(object):
"""
Adapter object used to normalize Charts inputs to a common interface.
Supported inputs are dict, list, tuple, np.ndarray and pd.DataFrame.
"""
def __init__(self, data, index=None, columns=None, force_alias=True):
self.__values = data
self._values = self.validate_values(data)
self.convert_index_to_int = False
self._columns_map = {}
self.convert_items_to_dict = False
if columns is None and force_alias:
# no column 'labels' defined for data... in this case we use
# default names
keys = getattr(self._values, 'keys', None)
if callable(keys):
columns = list(keys())
elif keys is None:
columns = list(map(str, range(len(data))))
else:
columns = list(keys)
if columns:
self._columns = columns
# define a mapping between the real keys to access data and the aliases
# we have defined using 'columns'
self._columns_map = dict(zip(columns, self.keys()))
if index is not None:
self._index = index
self.convert_items_to_dict = True
elif force_alias:
_index = getattr(self._values, 'index', None)
# check because if it is a callable self._values is not a
# dataframe (probably a list)
if _index is None:
indexes = self.index
if isinstance(indexes[0], int):
self._index = DEFAULT_INDEX_ALIASES[:][:len(self.values()[0])]
self.convert_items_to_dict = True
elif not callable(_index):
self._index = list(_index)
self.convert_items_to_dict = True
else:
self._index = DEFAULT_INDEX_ALIASES[:][:len(self.values()[0])]
self.convert_items_to_dict = True
@staticmethod
def is_number(value):
numbers = (float, ) + bokeh_integer_types
return isinstance(value, numbers)
@staticmethod
def is_datetime(value):
try:
dt = Datetime(value)
dt # shut up pyflakes
return True
except ValueError:
return False
@staticmethod
def validate_values(values):
if np and isinstance(values, np.ndarray):
if len(values.shape) == 1:
return np.array([values])
else:
return values
elif pd and isinstance(values, pd.DataFrame):
return values
elif isinstance(values, (dict, OrderedDict)):
if all(DataAdapter.is_number(x) for x in values.values()):
return values
return values
elif isinstance(values, (list, tuple)):
if all(DataAdapter.is_number(x) for x in values):
return [values]
return values
elif hasattr(values, '__array__'):
values = pd.DataFrame(np.asarray(values))
return values
# TODO: Improve this error message..
raise TypeError("Input type not supported! %s" % values)
def index_converter(self, x):
key = self._columns_map.get(x, x)
if self.convert_index_to_int:
key = int(key)
return key
def keys(self):
# assuming it's a dict or dataframe
keys = getattr(self._values, "keys", None)
if callable(keys):
return list(keys())
elif keys is None:
self.convert_index_to_int = True
indexes = range(len(self._values))
return list(map(str, indexes))
else:
return list(keys)
def __len__(self):
return len(self.values())
def __iter__(self):
for k in self.keys():
yield k
def __getitem__(self, key):
val = self._values[self.index_converter(key)]
# if we have "index aliases" we need to remap the values...
if self.convert_items_to_dict:
val = dict(zip(self._index, val))
return val
def values(self):
return self.normalize_values(self._values)
@staticmethod
def normalize_values(values):
_values = getattr(values, "values", None)
if callable(_values):
return list(_values())
elif _values is None:
return values
else:
# assuming it's a dataframe, in that case it returns transposed
# values compared to it's dict equivalent..
return list(_values.T)
def items(self):
return [(key, self[key]) for key in self]
def iterkeys(self):
return iter(self)
def itervalues(self):
for k in self:
yield self[k]
def iteritems(self):
for k in self:
yield (k, self[k])
@property
def columns(self):
try:
return self._columns
except AttributeError:
return list(self.keys())
@property
def index(self):
try:
return self._index
except AttributeError:
index = getattr(self._values, "index", None)
if not callable(index) and index is not None:
# guess it's a pandas dataframe..
return index
# no, it's not. So it's probably a list so let's get the
# values and check
values = self.values()
if isinstance(values, dict):
return list(values.keys())
else:
first_el = self.values()[0]
if isinstance(first_el, dict):
indexes = list(first_el.keys())
else:
indexes = range(0, len(self.values()[0]))
self._index = indexes
return indexes
#-----------------------------------------------------------------------------
# Convenience methods
#-----------------------------------------------------------------------------
@staticmethod
def get_index_and_data(values, index=None):
"""Parse values (that must be one of the DataAdapter supported
input types) and create an separate/create index and data
depending on values type and index.
Args:
values (iterable): container that holds data to be plotted using
on the Chart classes
Returns:
A tuple of (index, values), where: ``index`` is an iterable that
represents the data index and ``values`` is an iterable containing
the values to be plotted.
"""
_values = DataAdapter(values, force_alias=False)
if hasattr(values, 'keys'):
if index is not None:
if isinstance(index, string_types):
xs = _values[index]
else:
xs = index
else:
try:
xs = _values.index
except AttributeError:
xs = values.index
else:
if index is None:
xs = _values.index
elif isinstance(index, string_types):
xs = _values[index]
else:
xs = index
return xs, _values
| bsd-3-clause |
opendatadurban/scoda | scoda/public.py | 1 | 54121 | import itertools
import operator
from sqlalchemy_searchable import search
from scoda.app import app
from flask import request, url_for, redirect, flash, make_response, session, render_template, jsonify, Response, \
send_file
from flask_security import current_user
from itertools import zip_longest
from sqlalchemy.sql import select
from sqlalchemy import func, extract, desc
from .models import db
from .models import *
from .models.user import UserAnalysis
from .models.datasets import ExploreForm
from .models.maps import MapForm, NightFormETH, NightFormJHB
from pandas import read_sql_query
import gviz_api
import geojson, json
import pandas as pd
from .app import csrf
from werkzeug.datastructures import MultiDict
from urllib.parse import urlencode, urlparse, parse_qsl, urlsplit, parse_qs
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
@app.route('/help')
def help():
return render_template('help/help.html')
@app.route('/api/indicators-list/', defaults={'check': ''})
@app.route('/api/indicators-list/<check>', methods=['GET', 'POST'])
def api_indicators_list(check):
remove_list = ['Poverty rate', 'Gini Coefficient', 'Gross Value Add', 'Exports', 'Multiple deprivation index',
'Human Development Index']
if check == "codebook":
indicators_list = [[str(c.id), c.name] for c in CbIndicator.query.join(CbDataPoint,CbDataPoint.indicator_id == CbIndicator.id).all() if c.name not in remove_list]
else:
indicators_list = [[str(c.id), c.in_name] for c in Indicator.all() if c.in_name not in remove_list]
# print(indicators_list)
# payload = {"indicators_list": indicators_list}
return jsonify(indicators_list)
@app.route('/api/explore/', defaults={'check': ''})
@app.route('/api/explore/<check>', methods=['GET', 'POST'])
def api_explore(check):
form = ExploreForm()
status = 200
plot = 0
tour = 1
#ind = 76
#Note: Riaan Snyders: 10 June 2020 - Removed for now. Only functions on GET at the moment.
#if request.method == 'POST':
#if form.validate():
#data_json = request.get_json()
#ind = data_json["indicator_id"]
if request.args.get('indicator_id'):
ind = request.args.get('indicator_id')
else:
ind = 76
print(ind)
plot = 1
tour = 2
# codebook query
if check == "codebook":
query = db.session.query(CbRegion.name.label('re_name'), CbDataPoint.start_dt, CbIndicator.name.label('ds_name'), CbDataPoint.value). \
filter(CbDataPoint.indicator_id == ind).filter(CbDataPoint.indicator_id == CbIndicator.id). \
filter(CbDataPoint.region_id == CbRegion.id)
df = read_sql_query(query.statement, query.session.bind)
df = df.rename(columns={'name': 're_name', 'name.1': 'ds_name'})
df["year"] = df["start_dt"].apply(lambda x: int(x.strftime('%Y')))
df["start_dt"] = df["year"]
else:
query = db.session.query(Region.re_name, DataPoint.year, DataSet.ds_name, DataPoint.value). \
filter(DataPoint.indicator_id == ind).filter(DataPoint.dataset_id == DataSet.id). \
filter(DataPoint.region_id == Region.id)
df = read_sql_query(query.statement, query.session.bind)
df.to_csv('%s/data/%s' % (app.root_path, "data_test.csv"), index=False)
table = []
table_plot = []
years, cities, datasets = [list(df.year.unique()), list(df.re_name.unique()), list(df.ds_name.unique())]
cities = [c for c in cities]
options_list = [{'optid': i, 'optname': d} for i, d in enumerate(datasets, start=1)]
years_list = [{'optid': i, 'optname': 'Year: %s' % d} for i, d in enumerate(sorted(years), start=1)]
plot_type = 1
print(len(years))
if (len(datasets) > 1) or (len(years) == 1):
plot_type = 2
colours = ['#f44336', '#03a9f4', '#4caf50', '#ffc107', '#03a9f4', '#ff5722', '#9c27b0', '#8bc34a',
'#ffeb3b', '#9e9e9e', '#3f51b5', '#e91e63']
series = {i: {'color': colours[i]} for i in range(len(datasets))}
view = list(range(2, len(datasets) + 2))
view.insert(0, 0)
minVal = min(map(float, list(df.value.unique())))
maxVal = max(map(float, list(df.value.unique()))) * 1.1
head = ['City', 'Year']
for i in datasets:
head.append(str(i))
table.append(head)
table_plot.append(head)
# df.re_name = df.re_name.str.encode('utf-8')
if plot_type == 1:
df_i = df.iloc[:, [0, 1, 3]]
schema = [('City', 'string'), ('Year', 'string'), ('%s' % datasets[0], 'number')]
data_table = gviz_api.DataTable(schema)
data_table.LoadData(df_i.values)
table_plot = data_table.ToJSon(columns_order=('City', '%s' % datasets[0], 'Year'))
for c in cities:
for y in years:
row = [str(c), str(y)]
for d in datasets:
datapoint = df.loc[(df["re_name"] == c) & (df["year"] == y) & (df["ds_name"] == d), "value"]
if len(datapoint) == 0:
row.append(None)
else:
row.append(
float(df.loc[(df["re_name"] == c) & (df["year"] == y) & (
df["ds_name"] == d), "value"]))
table.append(row)
else:
for c in cities:
for y in years:
row = [str(c), str(y)]
for d in datasets:
datapoint = df.loc[(df["re_name"] == c) & (df["year"] == y) & (df["ds_name"] == d), "value"]
if len(datapoint) == 0:
row.append(None)
else:
row.append(
float(df.loc[(df["re_name"] == c) & (df["year"] == y) & (
df["ds_name"] == d), "value"]))
table.append(row)
yrs = ['Year'] + [str(y) for y in years[::-1]]
payload = {"plot":plot, "table":table, "table_plot":table_plot,"colours":colours,"year":str(max(years)), "series":series,
"view":view, "plot_type":plot_type,"min":minVal,"max":maxVal, "cities":cities, "options_list":options_list,
"years_list":years_list,"tour":tour, "years":yrs}
return jsonify(payload)
# else:
# form_errors = form.errors
# return {"form_errors":form_errors}
@app.route('/explore', methods=['GET', 'POST'])
def explore():
analyses = []
if current_user.is_authenticated:
query = db.session.query(UserAnalysis.id, UserAnalysis.ds_name, UserAnalysis.description) \
.filter(UserAnalysis.user_id == current_user.id).order_by(UserAnalysis.id.desc())
analyses = []
for i in grouper(query, 4):
analyses.append(i)
session['explore'] = []
form = ExploreForm()
status = 200
plot = 0
tour = 1
if request.method == 'POST':
if form.validate():
plot = 1
tour = 2
ind = form.indicator_id.data
query = db.session.query(Region.re_name, DataPoint.year, DataSet.ds_name, DataPoint.value). \
filter(DataPoint.indicator_id == ind).filter(DataPoint.dataset_id == DataSet.id). \
filter(DataPoint.region_id == Region.id)
print(query.all())
indicator = Indicator.query.get(ind)
df = read_sql_query(query.statement, query.session.bind)
# df.to_csv('%s/data/%s' % (app.root_path, "data_test.csv"), index=False)
table = []
years, cities, datasets = [list(df.year.unique()), list(df.re_name.unique()), list(df.ds_name.unique())]
cities = [c for c in cities]
options_list = [{'optid': i, 'optname': d} for i, d in enumerate(datasets, start=1)]
years_list = [{'optid': i, 'optname': 'Year: %s' % d} for i, d in enumerate(sorted(years), start=1)]
plot_type = 1
if (len(datasets) > 1) or (len(years) == 1):
plot_type = 2
colours = ['#f44336', '#03a9f4', '#4caf50', '#ffc107', '#03a9f4', '#ff5722', '#9c27b0', '#8bc34a',
'#ffeb3b', '#9e9e9e', '#3f51b5', '#e91e63']
series = {i: {'color': colours[i]} for i in range(len(datasets))}
view = list(range(2, len(datasets) + 2))
view.insert(0, 0)
minVal = min(map(float, list(df.value.unique())))
maxVal = max(map(float, list(df.value.unique()))) * 1.1
head = ['City', 'Year']
for i in datasets:
head.append(str(i))
table.append(head)
print(df)
# df.re_name = df.re_name.str.encode('utf-8')
if plot_type == 1:
df = df.iloc[:, [0, 1, 3]]
schema = [('City', 'string'), ('Year', 'string'), ('%s' % datasets[0], 'number')]
data_table = gviz_api.DataTable(schema)
data_table.LoadData(df.values)
table = data_table.ToJSon(columns_order=('City', '%s' % datasets[0], 'Year'))
else:
for c in cities:
for y in years:
row = [str(c), str(y)]
for d in datasets:
datapoint = df.loc[(df["re_name"] == c) & (df["year"] == y) & (df["ds_name"] == d), "value"]
if len(datapoint) == 0:
row.append(None)
else:
row.append(
float(df.loc[(df["re_name"] == c) & (df["year"] == y) & (
df["ds_name"] == d), "value"]))
table.append(row)
yrs = ['Year'] + [str(y) for y in years[::-1]]
return render_template('explore/explore.html', form=form, plot=plot, table=table, colours=colours,
year=str(max(years)), series=series, view=view, plot_type=plot_type, min=minVal,
max=maxVal, cities=cities, options_list=options_list, years_list=years_list,
tour=tour, indicator=indicator, analyses=analyses, years=yrs)
else:
if request.is_xhr:
status = 412
else:
flash('Please correct the problems below and try again.', 'warning')
else:
return render_template('explore/explore.html', form=form, tour=tour)
if not request.is_xhr:
resp = make_response(
render_template('explore/explore.html', form=form, plot=plot, tour=tour, analyses=analyses))
else:
resp = ''
return (resp, status,
# ensure the browser refreshes the page when Back is pressed
{'Cache-Control': 'no-cache, no-store, must-revalidate'})
@app.route('/demographics/<region_id>/<city_ward_code>/download', methods=['GET'])
def demographics_download(region_id, city_ward_code):
region = Region.query.get(region_id).re_name
if city_ward_code == 'None':
query = db.session.query(Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == region_id).all()
df = pd.DataFrame()
df['Year'] = range(1996, 2031)
for g in query:
df['%s - Ward %s' % (region, g[1])] = list(g[0])
else:
query = db.session.query(Ward.data, Ward.city_ward_code) \
.filter(Ward.city_ward_code == city_ward_code) \
.filter(Ward.region_id == region_id).all()
df = pd.DataFrame()
df['Year'] = range(1996, 2031)
for g in query:
df['%s - Ward %s' % (region, g[1])] = list(g[0])
return Response(df.to_csv(index=False), mimetype="text/csv",
headers={"Content-disposition": "attachment; filename=demographics.csv"})
@app.route('/demographics', methods=['GET', 'POST'])
def demographics():
analyses = []
if current_user.is_authenticated:
query = db.session.query(UserAnalysis.id, UserAnalysis.ds_name, UserAnalysis.description) \
.filter(UserAnalysis.user_id == current_user.id).order_by(UserAnalysis.id.desc())
analyses = []
for i in grouper(query, 4):
analyses.append(i)
session['demo'] = []
if 'maps' not in session.keys():
session['maps'] = {0: {}, 1: {}}
form1 = MapForm(prefix='form1', region_id='1', year=1)
print(form1.city_ward_code.choices)
status = 200
tour = 1
geometries1 = {}
forms = [form1]
if request.method == 'POST':
if all(f.validate() for f in forms):
for f, F in enumerate(forms):
for field in F:
if str(field.data) == 'None':
field.data = session['maps'][str(f)][field.name[6:]]
else:
session['maps'][str(f)][field.name[6:]] = field.data
tour = 0
# query = db.session.query(Area.geom.ST_AsGeoJSON(), Area.data)
year1 = int(form1.year.data)
year_ind1 = range(1996, 2031)
if form1.city_ward_code.data == '':
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == form1.region_id.data)
geometries1 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
if year1 == 0:
flow = 0
else:
flow = round(g[1][year1] - g[1][year1 - 1])
geometries1['features'].append({"type": "Feature", "properties": {"density": round(g[1][year1]),
"flow": flow,
"name": 'Ward %s' % g[2],
"year": year_ind1[year1]},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.region_id == form1.region_id.data).all()
region = db.session.query(Region.re_name).filter(Region.id == form1.region_id.data).first()
results = []
for r in query:
row = [val for val in list(r)[0]]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', '%s' % str(region[0])]]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m1 = 1.05 * max(df.sum(axis=0).tolist())
else:
query = db.session.query(Area.geom.ST_AsGeoJSON(), Area.data, Area.city_ward_code) \
.filter(Area.city_ward_code == form1.city_ward_code.data) \
.filter(Area.region_id == form1.region_id.data)
geometries1 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
if year1 == 0:
flow = 0
else:
flow = round(g[1][year1] - g[1][year1 - 1])
geometries1['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][year1]),
"flow": flow,
"name": 'Area %s' % g[2],
"year": year_ind1[year1]},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.city_ward_code == form1.city_ward_code.data). \
filter(Ward.region_id == form1.region_id.data).first()
region = db.session.query(Region.re_name).filter(Region.id == form1.region_id.data).first()
region2 = db.session.query(Ward.city_ward_code).filter(Ward.city_ward_code == form1.city_ward_code.data) \
.first()
results = []
for r in query:
row = [val for val in list(r)]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', '%s - Ward %s' % (str(region[0]), str(region2[0]))]]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m1 = 1.05 * max(df.sum(axis=0).tolist())
query = db.session.query(Ward.city_ward_code).filter(Ward.region_id == form1.region_id.data).order_by(
Ward.city_ward_code).distinct()
form1.city_ward_code.choices = [[str(i), 'Ward %s' % row.city_ward_code] for i, row in enumerate(query.all()
, start=1)]
form1.city_ward_code.choices.insert(0, ('', 'View All'))
return render_template('demographics/demographics.html', form1=form1, geometries1=geometries1,
table1=table1, tour=tour, max1=m1, region1=form1.region_id.data,
ward1=form1.city_ward_code.data, analyses=analyses)
else:
if request.is_xhr:
status = 412
else:
flash('Please correct the problems below and try again.', 'warning')
else:
session['maps'][0] = {'city_ward_code': '', 'region_id': 1, 'year': 1}
session['maps'][1] = {'city_ward_code': '', 'region_id': 4, 'year': 1}
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == 1)
geometries1 = {"type": "FeatureCollection",
"features": []}
geometries2 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries1['features'].append({"type": "Feature", "properties": {"density": round(g[1][1]),
"flow": round(g[1][1] - g[1][0]),
"name": 'Ward %s' % g[2],
"year": 1997},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.region_id == 1).all()
results = []
for r in query:
row = [val for val in list(r)[0]]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', 'Johannesburg']]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m = 1.05 * max(df.sum(axis=0).tolist())
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == 4)
for g in query:
d = json.loads(g[0])
geometries2['features'].append({"type": "Feature", "properties": {"density": round(g[1][1]),
"flow": round(g[1][1] - g[1][0]),
"name": 'Ward %s' % g[2],
"year": 1997},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.region_id == 4).all()
results = []
for r in query:
row = [val for val in list(r)[0]]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table2 = [['Year', 'EThekwini']]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table2.append([str(y), val])
m2 = 1.05 * max(df.sum(axis=0).tolist())
return render_template('demographics/demographics.html', form1=form1, geometries1=geometries1,
tour=tour, table1=table1, max1=m, region1=1, ward1=None, ward2=None, analyses=analyses
)
if not request.is_xhr:
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == 1)
geometries1 = {"type": "FeatureCollection",
"features": []}
geometries2 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries1['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][0]), "flow": 0, "name": g[2],
"year": 1996},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
geometries2['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][0]), "flow": 0, "name": g[2],
"year": 1996},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.region_id == 1).all()
results = []
for r in query:
row = [val for val in list(r)[0]]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', 'Johannesburg']]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m = 1.05 * max(df.sum(axis=0).tolist())
resp = make_response(render_template('demographics/demographics.html', form1=form1,
geometries1=geometries1, table1=table1,
tour=tour, max1=m, region1=1,
ward1=None, analyses=analyses))
else:
resp = ''
return (resp, status,
# ensure the browser refreshes the page when Back is pressed
{'Cache-Control': 'no-cache, no-store, must-revalidate'})
@app.route('/api/demographics', methods=['GET', 'POST'])
@csrf.exempt
def api_demographics():
analyses = []
session['demo'] = []
if 'maps' not in session.keys():
session['maps'] = {0: {}, 1: {}}
form1 = MapForm(prefix='form1', region_id='1', year=1)
geometries1 = {}
if request.method == 'POST':
data = request.get_json()
print(data)
#data = request.data.decode('utf-8')
#object = parse_qs(urlsplit('?' + data).query)
#object = {key: str(value[0]) for key, value in object.items()}
#if 'csrf_token' in object: del object['csrf_token']
#form1 = MapForm(MultiDict(object))
form1 = data
print(form1['year'])
#if form1.validate():
if form1:
tour = 0
# query = db.session.query(Area.geom.ST_AsGeoJSON(), Area.data)
#year1 = int(form1.year)
year1 = int(form1['year'])
year_ind1 = range(1996, 2031)
#if form1.city_ward_code.data == '':
if form1['city_ward_code'] == '':
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == form1['region_id'])
geometries1 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
if year1 == 0:
flow = 0
else:
flow = round(g[1][year1] - g[1][year1 - 1])
geometries1['features'].append({"type": "Feature", "properties": {"density": round(g[1][year1]),
"flow": flow,
"name": 'Ward %s' % g[2],
"year": year_ind1[year1]},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.region_id == form1['region_id']).all()
region = db.session.query(Region.re_name).filter(Region.id == form1['region_id']).first()
results = []
for r in query:
row = [val for val in list(r)[0]]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', '%s' % str(region[0])]]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m1 = 1.05 * max(df.sum(axis=0).tolist())
else:
query = db.session.query(Area.geom.ST_AsGeoJSON(), Area.data, Area.city_ward_code) \
.filter(Area.city_ward_code == int(form1['city_ward_code'])) \
.filter(Area.region_id == int(form1['region_id']))
geometries1 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
if year1 == 0:
flow = 0
else:
flow = round(g[1][year1] - g[1][year1 - 1])
geometries1['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][year1]),
"flow": flow,
"name": 'Area %s' % g[2],
"year": year_ind1[year1]},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.city_ward_code == int(form1['city_ward_code'])). \
filter(Ward.region_id == int(form1['region_id'])).first()
region = db.session.query(Region.re_name).filter(Region.id == int(form1['region_id'])).first()
region2 = db.session.query(Ward.city_ward_code).filter(Ward.city_ward_code == int(form1['city_ward_code'])) \
.first()
results = []
for r in query:
row = [val for val in list(r)]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', '%s - Ward %s' % (str(region[0]), str(region2[0]))]]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m1 = 1.05 * max(df.sum(axis=0).tolist())
query = db.session.query(Ward.city_ward_code).filter(Ward.region_id == int(form1['region_id'])).order_by(
Ward.city_ward_code).distinct()
#form1.city_ward_code.choices = [[str(i), 'Ward %s' % row.city_ward_code] for i, row in enumerate(query.all()
#, start=1)]
#form1.city_ward_code.choices.insert(0, ('', 'View All'))
resp = jsonify({'success': True, 'geometries1': geometries1,'table1':table1,
'tour':tour, 'max1':m1, 'region1':form1['region_id'],'ward1':form1['city_ward_code']})
resp.status_code = 200
return resp
else:
message = 'Please correct the problems below and try again.'
resp = jsonify(message=message)
resp.status_code = 500
return resp
else:
session['maps'][0] = {'city_ward_code': '', 'region_id': 1, 'year': 1}
session['maps'][1] = {'city_ward_code': '', 'region_id': 4, 'year': 1}
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == 1)
geometries1 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries1['features'].append({"type": "Feature", "properties": {"density": round(g[1][1]),
"flow": round(g[1][1] - g[1][0]),
"name": 'Ward %s' % g[2],
"year": 1997},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.region_id == 1).all()
results = []
for r in query:
row = [val for val in list(r)[0]]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', 'Johannesburg']]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m = 1.05 * max(df.sum(axis=0).tolist())
resp = jsonify({'success': True, 'table1': table1,
'max1': m, 'region1': 1, 'ward1': None,'ward2':None, 'geometries1': geometries1,
'form_year':form1.year.choices,'form_ward':form1.city_ward_code.choices,'form_city':form1.region_id.choices})
resp.status_code = 200
return resp
@app.route('/nightlights_jhb', methods=['GET', 'POST'])
def demographics_night_jhb():
analyses = []
if current_user.is_authenticated:
query = db.session.query(UserAnalysis.id, UserAnalysis.ds_name, UserAnalysis.description) \
.filter(UserAnalysis.user_id == current_user.id).order_by(UserAnalysis.id.desc())
analyses = []
for i in grouper(query, 4):
analyses.append(i)
session['night'] = []
form = NightFormJHB()
status = 200
tour = 1
if request.method == 'POST':
if form.validate():
tour = 0
if form.city_ward_code.data == '':
query = db.session.query(Grid.geom.ST_AsGeoJSON(), Grid.data, Grid.city_grid_id, Grid.reference). \
filter(Grid.region_id == 1)
geometries = {"type": "FeatureCollection",
"features": []}
bias_ind = [x / 10.0 for x in range(5, 21, 1)].index(float(form.grid_bias.data))
for g in query:
d = json.loads(g[0])
geometries['features'].append({"type": "Feature", "properties": {"density": round(g[1][bias_ind] - g[3]),
"name": 'Grid %s' % g[2],
"year": 2016},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.city_ward_code).filter(Ward.region_id == 1).order_by(
Ward.city_ward_code).distinct()
form.city_ward_code.choices = [[str(i), 'Ward %s' % row.city_ward_code] for i, row in
enumerate(query.all()
, start=1)]
form.city_ward_code.choices.insert(0, ('', 'View All'))
return render_template('demographics/demographics_night.html', form=form, geometries=geometries,
bias_val=form.grid_bias.data)
else:
w = db.session.query(Ward.id).filter(Ward.city_ward_code == form.city_ward_code.data)\
.filter(Ward.region_id == 1).first()
w = Ward.query.get(w[0])
query = db.session.query(Grid.geom.ST_AsGeoJSON(), Grid.data, Grid.city_grid_id, Grid.reference) \
.filter(Grid.geom.intersects(w.geom))
geometries = {"type": "FeatureCollection",
"features": []}
bias_ind = [x / 10.0 for x in range(5, 21, 1)].index(float(form.grid_bias.data))
for g in query:
d = json.loads(g[0])
geometries['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][bias_ind] - g[3]),
"name": 'Grid %s' % g[2],
"year": 2016},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code)\
.filter(Ward.city_ward_code == form.city_ward_code.data).filter(Ward.region_id == 1)
geometries2 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries2['features'].append(
{"type": "Feature", "properties": {"density": 0,
"name": 'Ward %s' % form.city_ward_code.data,
"year": 2016},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.city_ward_code).filter(Ward.region_id == 1).order_by(
Ward.city_ward_code).distinct()
form.city_ward_code.choices = [[str(i), 'Ward %s' % row.city_ward_code] for i, row in enumerate(query.all()
, start=1)]
form.city_ward_code.choices.insert(0, ('', 'View All'))
return render_template('demographics/demographics_night.html', form=form, geometries=geometries,
bias_val=form.grid_bias.data, geometries2=geometries2, ward=form.city_ward_code.data)
else:
if request.is_xhr:
status = 412
else:
flash('Please correct the problems below and try again.', 'warning')
else:
query = db.session.query(Grid.geom.ST_AsGeoJSON(), Grid.data, Grid.city_grid_id, Grid.reference). \
filter(Grid.region_id == 1)
geometries = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries['features'].append({"type": "Feature", "properties": {"density": g[1][0] - g[3],
"name": 'Grid %s' % g[2],
"year": 2016},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
return render_template('demographics/demographics_night.html', form=form, bias_val=0.5, geometries=geometries,
analyses=analyses)
if not request.is_xhr:
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == 1)
geometries1 = {"type": "FeatureCollection",
"features": []}
geometries2 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries1['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][0]), "flow": 0, "name": g[2],
"year": 1996},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
geometries2['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][0]), "flow": 0, "name": g[2],
"year": 1996},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.region_id == 1).all()
results = []
for r in query:
row = [val for val in list(r)[0]]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', 'Johannesburg']]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m = 1.05 * max(df.sum(axis=0).tolist())
resp = make_response(render_template('demographics/demographics.html', form1=form1, form2=form2,
geometries1=geometries1, geometries2=geometries2, table1=table1,
table2=table1, tour=tour, max1=m, max2=m, region1=1, region2=1,
ward1=None, ward2=None, analyses=analyses))
else:
resp = ''
return (resp, status,
# ensure the browser refreshes the page when Back is pressed
{'Cache-Control': 'no-cache, no-store, must-revalidate'})
@app.route('/nightlights_eth', methods=['GET', 'POST'])
def demographics_night_eth():
analyses = []
if current_user.is_authenticated:
query = db.session.query(UserAnalysis.id, UserAnalysis.ds_name, UserAnalysis.description) \
.filter(UserAnalysis.user_id == current_user.id).order_by(UserAnalysis.id.desc())
analyses = []
for i in grouper(query, 4):
analyses.append(i)
session['night'] = []
form = NightFormETH()
status = 200
tour = 1
if request.method == 'POST':
if form.validate():
tour = 0
if form.city_ward_code.data == '':
query = db.session.query(Grid.geom.ST_AsGeoJSON(), Grid.data, Grid.city_grid_id, Grid.reference). \
filter(Grid.region_id == 4)
geometries = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries['features'].append({"type": "Feature", "properties": {"density": round(g[1][0]-g[3]),
"name": 'Grid %s' % g[2],
"year": 2016},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.city_ward_code).filter(Ward.region_id == 4).order_by(
Ward.city_ward_code).distinct()
form.city_ward_code.choices = [[str(i), 'Ward %s' % row.city_ward_code] for i, row in
enumerate(query.all()
, start=1)]
form.city_ward_code.choices.insert(0, ('', 'View All'))
return render_template('demographics/demographics_night_ETH.html', form=form, geometries=geometries)
else:
w = db.session.query(Ward.id).filter(Ward.city_ward_code == form.city_ward_code.data)\
.filter(Ward.region_id == 4).first()
w = Ward.query.get(w[0])
query = db.session.query(Grid.geom.ST_AsGeoJSON(), Grid.data, Grid.city_grid_id, Grid.reference) \
.filter(Grid.geom.intersects(w.geom)).filter(Grid.region_id == 4)
geometries = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][0]-g[3]),
"name": 'Grid %s' % g[2],
"year": 2016},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code)\
.filter(Ward.city_ward_code == form.city_ward_code.data).filter(Ward.region_id == 4)
geometries2 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries2['features'].append(
{"type": "Feature", "properties": {"density": 0,
"name": 'Ward %s' % form.city_ward_code.data,
"year": 2016},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.city_ward_code).filter(Ward.region_id == 4).order_by(
Ward.city_ward_code).distinct()
form.city_ward_code.choices = [[str(i), 'Ward %s' % row.city_ward_code] for i, row in enumerate(query.all()
, start=1)]
form.city_ward_code.choices.insert(0, ('', 'View All'))
return render_template('demographics/demographics_night_ETH.html', form=form, geometries=geometries,
geometries2=geometries2, ward=form.city_ward_code.data)
else:
if request.is_xhr:
status = 412
else:
flash('Please correct the problems below and try again.', 'warning')
else:
query = db.session.query(Grid.geom.ST_AsGeoJSON(), Grid.data, Grid.city_grid_id, Grid.reference). \
filter(Grid.region_id == 4)
geometries = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries['features'].append({"type": "Feature", "properties": {"density": round(g[1][0]-g[3]),
"name": 'Grid %s' % g[2],
"year": 2016},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
return render_template('demographics/demographics_night_ETH.html', form=form, geometries=geometries,
analyses=analyses)
if not request.is_xhr:
query = db.session.query(Ward.geom.ST_AsGeoJSON(), Ward.data, Ward.city_ward_code). \
filter(Ward.region_id == 1)
geometries1 = {"type": "FeatureCollection",
"features": []}
geometries2 = {"type": "FeatureCollection",
"features": []}
for g in query:
d = json.loads(g[0])
geometries1['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][0]), "flow": 0, "name": g[2],
"year": 1996},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
geometries2['features'].append(
{"type": "Feature", "properties": {"density": round(g[1][0]), "flow": 0, "name": g[2],
"year": 1996},
"geometry": {"type": "Polygon", "coordinates": d['coordinates']}})
query = db.session.query(Ward.data).filter(Ward.region_id == 1).all()
results = []
for r in query:
row = [val for val in list(r)[0]]
results.append(row)
df = pd.DataFrame(results).fillna(value=0)
table1 = [['Year', 'Johannesburg']]
for y, val in zip(range(1996, 2031), df.sum(axis=0).tolist()):
table1.append([str(y), val])
m = 1.05 * max(df.sum(axis=0).tolist())
resp = make_response(render_template('demographics/demographics.html', form1=form1, form2=form2,
geometries1=geometries1, geometries2=geometries2, table1=table1,
table2=table1, tour=tour, max1=m, max2=m, region1=1, region2=1,
ward1=None, ward2=None, analyses=analyses))
else:
resp = ''
return (resp, status,
# ensure the browser refreshes the page when Back is pressed
{'Cache-Control': 'no-cache, no-store, must-revalidate'})
@app.route('/return-land/')
def land_gen():
return send_file('data/Ethekwini_Region_Data.xlsx', as_attachment=True)
@app.route('/_parse_data', methods=['GET'])
def parse_data():
kwargs = {}
for i in ['dataset_id', 'indicator_id', 'region_id', 'type_id', 'theme_id', 'year']:
param = request.args.get(i)
if (i == 'year'):
if (str(param) != 'Empty') and (param is not None) and (str(param) != ''):
kwargs[i] = int(param)
else:
pass
elif (param is not None) and (str(param) != ''):
kwargs[i] = param
session['explore'] = [i for i in kwargs]
datasets = db.session.query(DataPoint.dataset_id).filter_by(**kwargs).distinct()
indicators = db.session.query(DataPoint.indicator_id).filter_by(**kwargs).distinct()
regions = db.session.query(DataPoint.region_id).filter_by(**kwargs).distinct()
types = db.session.query(DataPoint.type_id).filter_by(**kwargs).distinct()
themes = db.session.query(DataPoint.theme_id).filter_by(**kwargs).distinct()
years = db.session.query(DataPoint.year).filter_by(**kwargs).distinct()
response = {}
remove_list = ['Poverty rate', 'Gini Coefficient', 'Gross Value Add', 'Exports', 'Multiple deprivation index',
'Human Development Index']
dataset_list = [(i[0], str(DataSet.query.filter_by(id=i).first().ds_name)) for i in datasets if
str(DataSet.query.filter_by(id=i).first().ds_name) not in remove_list]
if 'dataset_id' not in session['explore']:
dataset_list.insert(0, ('', 'Empty'))
else:
dataset_list.insert(1, ('', 'Empty'))
response['dataset'] = dataset_list
indicator_list = [[i[0], str(Indicator.query.filter_by(id=i).first().in_name)] for i in indicators if
str(Indicator.query.filter_by(id=i).first().in_name) not in remove_list]
if 'indicator_id' not in session['explore']:
indicator_list.insert(0, ('', 'Empty'))
response['ind_ready'] = 0
else:
indicator_list.insert(1, ('', 'Empty'))
response['ind_ready'] = 1
response['indicator'] = indicator_list
region_list = [(i[0], str(Region.query.filter_by(id=i).first().re_name)) for i in regions]
if 'region_id' not in session['explore']:
region_list.insert(0, ('', 'Empty'))
else:
region_list.insert(1, ('', 'Empty'))
response['region'] = region_list
type_list = [(i[0], str(Type.query.filter_by(id=i).first().ty_name)) for i in types]
if 'type_id' not in session['explore']:
type_list.insert(0, ('', 'Empty'))
else:
type_list.insert(1, ('', 'Empty'))
response['type'] = type_list
theme_list = [(i[0], str(Theme.query.filter_by(id=i).first().th_name)) for i in themes]
if 'theme_id' not in session['explore']:
theme_list.insert(0, ('', 'Empty'))
else:
theme_list.insert(1, ('', 'Empty'))
response['theme'] = theme_list
year_list = [(str(i), str(y[0])) for i, y in enumerate(sorted(years))]
if 'year' not in session['explore']:
year_list.insert(0, ('', 'Empty'))
else:
year_list.insert(1, ('', 'Empty'))
response['year'] = year_list
return jsonify(response)
@app.route('/_parse_demo', methods=['GET'])
def parse_demo():
kwargs = {}
for i in ['region_id', 'ward_id']:
param = request.args.get(i)
if (param is not None) and (str(param) != ''):
kwargs[i] = param
session['demo'] = [i for i in kwargs]
wards = db.session.query(Ward.city_ward_code).filter_by(**kwargs).distinct().order_by(Ward.city_ward_code)
response = {}
ward_list = [(str(i[0]), 'Ward %s' % Ward.query.filter_by(id=i).first().city_ward_code) for i in wards]
if 'ward_id' not in session['demo']:
ward_list.insert(0, ('', 'View All'))
else:
ward_list.insert(1, ('', 'View All'))
response['wards'] = ward_list
return jsonify(response)
@app.route('/api/codebook', methods=['GET', 'POST'])
@app.route('/api/codebook/<int:page>', methods=['GET', 'POST'])
@csrf.exempt
def api_codebook(page=1):
query = db.session.query(CbIndicator). \
outerjoin(CbTheme, CbTheme.id == CbIndicator.theme_id). \
outerjoin(CbSource, CbSource.id == CbIndicator.source_id). \
outerjoin(CbUnit, CbUnit.id == CbIndicator.unit_id)
if request.method == 'POST':
data = request.get_json()
if data['c88']:
query = query.filter(CbIndicator.c88_theme.in_(data['c88']))
if data['socr']:
query = query.filter(CbIndicator.socr_theme.in_(data['socr']))
if data['sdg']:
query = query.filter(CbIndicator.sdg_theme.in_(data['sdg']))
if data['search']:
query = search(query, data['search'], sort=True)
else:
query = query.limit(150).offset((page - 1) * 20)
row_count = query.count()
query = query.all()
# query.sort(key=lambda x: x.code)
result_list = [row_count]
for day, dicts_for_group_code in itertools.groupby(query, key=lambda x:x.group_code):
dicts_for_group_code = list(dicts_for_group_code)
day_dict = {
"id": str(dicts_for_group_code[0].id),
"varCode": dicts_for_group_code[0].code,
"groupCode": dicts_for_group_code[0].group_code,
"indicator": dicts_for_group_code[0].name,
"c88": dicts_for_group_code[0].c88_theme,
"socr": dicts_for_group_code[0].socr_theme,
"sdg": dicts_for_group_code[0].sdg_theme,
"definition": dicts_for_group_code[0].definition,
"source": dicts_for_group_code[0].source.name if dicts_for_group_code[0].source else None,
"reportingResponsibility": dicts_for_group_code[0].reporting_responsibility,
"notesOnCalculation": dicts_for_group_code[0].notes_on_calculation,
"variableType": dicts_for_group_code[0].unit.name,
"frequencyOfCollection": dicts_for_group_code[0].frequency_of_collection,
"automatibility": dicts_for_group_code[0].automatable,
"granulity": dicts_for_group_code[0].granularity,
"gathering_method": dicts_for_group_code[0].gathering_method,
"expandability": dicts_for_group_code[0].expandable,
"period": dicts_for_group_code[0].period,
"unit_of_measurement": dicts_for_group_code[0].unit.name,
"source_link": dicts_for_group_code[0].url_link,
"data_check":True if dicts_for_group_code[0].indicator_data else False
}
children = []
dicts_for_group_code.pop(0)
for d in dicts_for_group_code:
child = {
"id": str(d.id),
"varCode": d.code,
"groupCode": d.group_code,
"indicator": d.name,
"c88": d.c88_theme,
"socr": d.socr_theme,
"sdg": d.sdg_theme,
"definition": d.definition,
"source": d.source.name if d.source else None,
"reportingResponsibility": d.reporting_responsibility,
"notesOnCalculation": d.notes_on_calculation,
"variableType": d.unit.name,
"frequencyOfCollection": d.frequency_of_collection,
"automatibility": d.automatable,
"granulity": d.granularity,
"gathering_method": d.gathering_method,
"expandability": d.expandable,
"period": d.period,
"unit_of_measurement": d.unit.name,
"source_link": d.url_link,
"data_check": bool(d.indicator_data),
}
children.append(child)
day_dict.update({"children": children})
result_list.append(day_dict)
return jsonify(result_list)
| apache-2.0 |
mwindau/praktikum | v351/dreieck.py | 1 | 1188 | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import linregress
from scipy.optimize import curve_fit
oberwelle3, amplitude3 = np.genfromtxt('Rohdaten/dreieckspannung.txt',unpack=True)
plt.plot(oberwelle3, amplitude3,'k.',label="Messdaten")
#plt.legend(loc='best')
plt.grid()
#plt.xlim(0,1.5)
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'Oberwellen')
plt.ylabel(r'$\mathrm{U}/V$')
plt.tight_layout()
#plt.show()
#plt.savefig('build/50ohm.pdf')
####################
newX = np.logspace(-4, 1, base=10) # Makes a nice domain for the fitted curves.
# This avoids the sorting and the swarm of lines.
# Let's fit an exponential function.
# This looks like a line on a lof-log plot.
def myExpFunc(x, a, b):
return a * np.power(x, b)
popt, pcov = curve_fit(myExpFunc, oberwelle3, amplitude3)
plt.plot(newX, myExpFunc(newX, *popt), 'r-',
label="Fit".format(*popt))
plt.xlim(10**-2, 10**1)
plt.ylim(10**-0.5, 10**1)
print('Exponential Fit: y = (a*(x**b))')
print('\ta = popt[0] = {0}\n\tb = popt[1] = {1}'.format(*popt))
####################
plt.legend(loc='best')
#plt.show()
plt.savefig('build/dreieckspannung.pdf')
| mit |
dhruv13J/scikit-learn | examples/svm/plot_iris.py | 62 | 3251 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problem.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
ilo10/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 134 | 7452 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
imatge-upc/saliency-2016-cvpr | shallow/train.py | 2 | 3064 | # add to kfkd.py
from lasagne import layers
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet,BatchIterator
import os
import numpy as np
from sklearn.utils import shuffle
import cPickle as pickle
import matplotlib.pyplot as plt
import Image
import ImageOps
from scipy import misc
import scipy.io
import theano
def load():
f = file('data_Salicon_T.cPickle', 'rb')
loaded_obj = pickle.load(f)
f.close()
X, y = loaded_obj
return X, y
def float32(k):
return np.cast['float32'](k)
class AdjustVariable(object):
def __init__(self, name, start=0.03, stop=0.001):
self.name = name
self.start, self.stop = start, stop
self.ls = None
def __call__(self, nn, train_history):
if self.ls is None:
self.ls = np.linspace(self.start, self.stop, nn.max_epochs)
epoch = train_history[-1]['epoch']
new_value = float32(self.ls[epoch - 1])
getattr(nn, self.name).set_value(new_value)
class FlipBatchIterator(BatchIterator):
def transform(self, Xb, yb):
Xb, yb = super(FlipBatchIterator, self).transform(Xb, yb)
# Flip half of the images in this batch at random:
bs = Xb.shape[0]
indices = np.random.choice(bs, bs / 2, replace=False)
Xb[indices] = Xb[indices, :, :, ::-1]
tmp = yb[indices].reshape(bs/2,1,48,48)
mirror = tmp[ :,:,:, ::-1]
yb[indices] = mirror.reshape(bs/2,48*48)
return Xb, yb
net2 = NeuralNet(
layers=[
('input', layers.InputLayer),
('conv1', layers.Conv2DLayer),
('pool1', layers.MaxPool2DLayer),
('conv2', layers.Conv2DLayer),
('pool2', layers.MaxPool2DLayer),
('conv3', layers.Conv2DLayer),
('pool3', layers.MaxPool2DLayer),
('hidden4', layers.DenseLayer),
('maxout6',layers.FeaturePoolLayer),
('output', layers.DenseLayer),
],
input_shape=(None, 3, 96, 96),
conv1_num_filters=32, conv1_filter_size=(5, 5), pool1_pool_size=(2, 2),
conv2_num_filters=64, conv2_filter_size=(3, 3), pool2_pool_size=(2, 2),
conv3_num_filters=64, conv3_filter_size=(3, 3), pool3_pool_size=(2, 2),
hidden4_num_units=48*48*2,
maxout6_pool_size=2,output_num_units=48*48,output_nonlinearity=None,
update_learning_rate=theano.shared(float32(0.05)),
update_momentum=theano.shared(float32(0.9)),
regression=True,
on_epoch_finished=[
AdjustVariable('update_learning_rate', start=0.05, stop=0.0001),
AdjustVariable('update_momentum', start=0.9, stop=0.999),
],
batch_iterator_train=FlipBatchIterator(batch_size=128),
max_epochs=1200,
verbose=1,
)
X, y = load()
print("X.shape == {}; X.min == {:.3f}; X.max == {:.3f}".format(
X.shape, X.min(), X.max()))
print("y.shape == {}; y.min == {:.3f}; y.max == {:.3f}".format(
y.shape, y.min(), y.max()))
X = X.astype(np.float32)
y = y.astype(np.float32)
net.fit(X, y)
with open('JuntingNet_SALICON.pickle', 'wb') as f:
pickle.dump(net2, f, -1) | mit |
chankeypathak/pandas-matplotlib-examples | Lesson 9/export.py | 1 | 1179 | import pandas as pd
from sqlalchemy import create_engine, MetaData, Table, select
# Parameters
TableName = "data"
DB = {
'drivername': 'mssql+pyodbc',
'servername': 'DAVID-THINK',
#'port': '5432',
#'username': 'lynn',
#'password': '',
'database': 'BizIntel',
'driver': 'SQL Server Native Client 11.0',
'trusted_connection': 'yes',
'legacy_schema_aliasing': False
}
# Create the connection
engine = create_engine(DB['drivername'] + '://' + DB['servername'] + '/' + DB['database'] + '?' + 'driver=' + DB['driver'] + ';' + 'trusted_connection=' + DB['trusted_connection'], legacy_schema_aliasing=DB['legacy_schema_aliasing'])
conn = engine.connect()
# Required for querying tables
metadata = MetaData(conn)
# Table to query
tbl = Table(TableName, metadata, autoload=True, schema="dbo")
#tbl.create(checkfirst=True)
# Select all
sql = tbl.select()
# run sql code
result = conn.execute(sql)
# Insert to a dataframe
df = pd.DataFrame(data=list(result), columns=result.keys())
# Close connection
conn.close()
print('Done')
df.to_csv('DimDate.csv', index=False)
df.to_excel('DimDate.xls', index=False)
df.to_csv('DimDate.txt', index=False)
| mit |
gpersistence/tstop | python/persistence/PartitionData.py | 1 | 8153 | #TSTOP
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
import os
import sys
import argparse
from math import ceil
import numpy
from sklearn.cross_validation import StratifiedKFold
from Datatypes.JSONObject import load_data, save_data
from Datatypes.Segments import SegmentInfo
from Datatypes.Configuration import Configuration
from Datatypes.TrainTestPartitions import TrainTestPartition, TrainTestPartitions
def PartitionData(segment_info, split, avoid_overlap=False, segment_size=0,
file_based=False, preserve_labels=False, override_preset=False, surpress_warning=False, seed=None) :
'''
Accepts a list of Datatype.Segments.SegmentInfo and a float between 0 and 1,
and outputs a pair of lists of indices, (train, test) corresponding
to a parition of the input list
len(train) approximates split * len(segment_info)
Intersection of train and test is an empty set
Union of train and test is not guaranteed to be range(len(segment_info))
Optional arguments:
avoid_overlap omits entries in test that would have overlapping data with entries in train,
as indicated by the range [segment_start:segment_start+segment_size]
segment_size interacts with avoid overlap, because only segment_start is contained in the
SegmentInfo class
file_based creates partitions where segments with the same filename for source data are
in the same partition
preserve_label tries to split the populations of labels evenly
'''
segment_count = len(segment_info)
segment_range = range(segment_count)
# check to see if we have a preset train / test split for all data and we aren't overriding that
if not override_preset and [0 for s in segment_info if s.learning == None] == [] :
return TrainTestPartition([i for i in segment_range if segment_info[i].learning == 'train'],
[i for i in segment_range if segment_info[i].learning == 'test'], None)
train_goal_len = int(ceil(segment_count * split))
if preserve_labels :
labels = [s.max_label() for s in segment_info]
label_set = list(set(labels))
label_count = [(l0,len([l for l in labels if l == l0])) for l0 in label_set]
label_goal = [(str(l), int(round(c * split))) for (l,c) in label_count]
for ((l0,g),(l1,c)) in zip(label_goal, label_count) :
if (g == 0) or (g == c) and not surpress_warning:
print "PartitionData warning: not enough entries (%d) of label %s to properly make a train / test split of ratio %s" % (c, l0, split)
label_goal = dict(label_goal)
train = []
test = []
if seed != None :
random.seed(seed)
state = random.getstate()
if file_based :
files = list(set([s.filename for s in segment_info]))
random.shuffle(files)
for f in files :
f_indices = [x for (x,y) in zip(segment_range, segment_info) if y.filename == f]
if preserve_labels :
f_labels = [str(labels[i]) for i in f_indices]
extend_train = True
for l in label_goal.keys() :
count = len([l0 for l0 in f_labels if l0 == l])
if count > label_goal[l] :
extend_train = False
break
if extend_train :
train.extend(f_indices)
for l in label_goal.keys() :
count = len([l0 for l0 in f_labels if l0 == l])
label_goal[l] = label_goal[l] - count
else :
test.extend(f_indices)
else :
if len(train) + len(f_indices) < train_goal_len :
train.extend(f_indices)
else :
test.extend(f_indices)
else :
random.shuffle(segment_range)
if preserve_labels :
for i in segment_range:
l = str(labels[i])
if label_goal[l] > 0 :
train.append(i)
label_goal[l] = label_goal[l] - 1
else :
test.append(i)
else :
train = segment_range[0:train_goal_len]
test = segment_range[train_goal_len:]
return TrainTestPartition(train,test,state)
def generate_partitions(config, segment_info, cv_iterations=0, seed=None) :
partition = PartitionData(segment_info,
config.learning_split,
avoid_overlap=True,
segment_size=config.segment_size,
file_based=True if (config.data_type == "BirdSoundsSegments" or
config.data_type == "KitchenMocapSegments") \
else False,
preserve_labels=True,
seed=seed)
all_labels = [segment_info[i].max_label() for i in partition.train]
if cv_iterations > 0 :
skf = StratifiedKFold(all_labels, n_folds=cv_iterations)
cross_validation = [TrainTestPartition([partition.train[i] for i in train_index],
[partition.train[i] for i in test_index], None) \
for train_index, test_index in skf]
else :
cross_validation = None
learning_trials = [PartitionData(segment_info,
config.learning_split,
avoid_overlap=True,
segment_size=config.segment_size,
file_based=True if (config.data_type == "BirdSoundsSegments" or
config.data_type == "KitchenMocapSegments") \
else False,
preserve_labels=True,
seed=None) for i in range(config.learning_iterations)]
return TrainTestPartitions(config, segment_info, cross_validation, learning_trials)
if __name__ == "__main__" :
parser = argparse.ArgumentParser("Tool to generate train / test splits for testing and cross validation")
parser.add_argument("--segments", "-i")
parser.add_argument("--outfile", "-o")
parser.add_argument("--learning-split", "-s", type=float)
parser.add_argument("--learning-iterations", "-I", type=int)
parser.add_argument("--cv-iterations", "-v", default=5, type=int)
parser.add_argument("--seed", "-S")
args = parser.parse_args(sys.argv[1:])
segments_json = load_data(args.segments, 'segments', None, None, sys.argv[0] + " : ")
if segments_json == None :
print "Could not load Segments from %s" % (args.segments,)
sys.exit(1)
segment_info = [SegmentInfo.fromJSONDict(s) for s in segments_json['segments']]
config = Configuration.fromJSONDict(segments_json['config'])
if args.learning_split != None :
config.learning_split = args.learning_split
if args.learning_iterations != None :
config.learning_iterations = args.learning_iterations
output = generate_partitions(config, segment_info, cv_iterations=args.cv_iterations, seed=args.seed)
if args.outfile == None :
args.outfile = TrainTestPartitions.get_partition_filename(config)
print "Writing %s" % (args.outfile,)
save_data(args.outfile, output.toJSONDict())
| gpl-3.0 |
noelevans/sandpit | kaggle/washington_bike_share/knn_normalising.py | 1 | 3166 | import datetime
import logging
import math
import random
import pandas as pd
logging.basicConfig(level=logging.INFO)
INPUT_FIELDS = ('holiday', 'workingday', 'temp', 'atemp', 'humidity',
'windspeed', 'hour', 'day_of_year', 'day_of_week')
PERIODICS = ('hour', 'day_of_year', 'day_of_week')
RESULT_FIELD = 'count'
def normalise(df, normalise=[]):
mins = dict((field, min(df[field])) for field in normalise)
maxes = dict((field, max(df[field])) for field in normalise)
for field in normalise:
f = lambda x: float(x - mins[field]) / (maxes[field] - mins[field])
df[field] = map(f, df[field])
return df
def load_and_munge_training_data(filename):
parse_hour = lambda dt: int(dt.split()[1].split(':')[0])
parse_day_of_week = lambda dt: parse_date(dt).weekday()
def parse_date(dt):
return datetime.date(*(int(x) for x in dt.split()[0].split('-')))
def parse_day_of_year(dt):
_date = parse_date(dt)
year_start = datetime.date(_date.year, 1, 1)
return (_date - year_start).days
df = pd.read_csv(open(filename))
df['hour'] = map(parse_hour, df['datetime'])
df['day_of_year'] = map(parse_day_of_year, df['datetime'])
df['day_of_week'] = map(parse_day_of_week, df['datetime'])
return normalise(df, INPUT_FIELDS)
def euclidean_dist(a, b):
diff = lambda m, n, field: (m - n) % 1 if field in PERIODICS else m - n
return math.sqrt(sum((diff(a[f], b[f], f)**2 for f in INPUT_FIELDS)))
def shuffle(df):
length = len(df)
chosen_indices = random.sample(range(length), length)
return df.irow(chosen_indices)
def most_influential(training, fields):
def homogeneity(field):
return training.groupby(field)[RESULT_FIELD].apply(np.std).sum()
return sorted((homogeneity(f), f) for f in fields)[0][1]
def knn(vector, neighbours, k=3):
ds = [(euclidean_dist(vector, n), n) for _, n in neighbours.iterrows()]
return sorted(ds, key=lambda a: a[0])[:k]
def gaussian_weight(dist, sigma=12.0):
return math.exp(-dist**2/(2*sigma**2))
def estimate(test, training):
neighbour_dists = knn(test, training)
weights = [(gaussian_weight(d), n) for d, n in neighbour_dists]
sum_weights = sum(w for w, _ in weights)
mean = sum(w * n[RESULT_FIELD] for w, n in weights) / sum_weights
return int(mean)
def main():
dry_run = False
all_train = load_and_munge_training_data('train.csv')
if dry_run:
train_on = 0.6
all_train = shuffle(all_train)
split = int(train_on * len(all_train))
train = all_train[: split]
test = all_train[split+1:]
else:
train = all_train
test = load_and_munge_training_data('test.csv')
filename = 'knn-normalising.csv'
with open(filename, 'w') as f:
f.write('datetime,count\n')
for n, t in test.iterrows():
f.write('%s,%i\n' % (t['datetime'], estimate(t, train)))
print '%s,%i' % (t['datetime'], estimate(t, train))
if __name__ == '__main__':
main()
| mit |
wangmiao1981/spark | python/pyspark/pandas/tests/test_stats.py | 6 | 18881 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
try:
from pandas._testing import makeMissingDataframe
except ImportError:
from pandas.util.testing import makeMissingDataframe
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.testing.pandasutils import PandasOnSparkTestCase, SPARK_CONF_ARROW_ENABLED
from pyspark.testing.sqlutils import SQLTestUtils
class StatsTest(PandasOnSparkTestCase, SQLTestUtils):
def _test_stat_functions(self, pdf_or_pser, psdf_or_psser):
functions = ["max", "min", "mean", "sum", "count"]
for funcname in functions:
self.assert_eq(getattr(psdf_or_psser, funcname)(), getattr(pdf_or_pser, funcname)())
functions = ["std", "var", "product", "sem"]
for funcname in functions:
self.assert_eq(
getattr(psdf_or_psser, funcname)(),
getattr(pdf_or_pser, funcname)(),
check_exact=False,
)
functions = ["std", "var", "sem"]
for funcname in functions:
self.assert_eq(
getattr(psdf_or_psser, funcname)(ddof=0),
getattr(pdf_or_pser, funcname)(ddof=0),
check_exact=False,
)
# NOTE: To test skew, kurt, and median, just make sure they run.
# The numbers are different in spark and pandas.
functions = ["skew", "kurt", "median"]
for funcname in functions:
getattr(psdf_or_psser, funcname)()
def test_stat_functions(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [1, 2, 3, 4], "C": [1, np.nan, 3, np.nan]})
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf.A, psdf.A)
self._test_stat_functions(pdf, psdf)
# empty
self._test_stat_functions(pdf.A.loc[[]], psdf.A.loc[[]])
self._test_stat_functions(pdf.loc[[]], psdf.loc[[]])
def test_stat_functions_multiindex_column(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf.A, psdf.A)
self._test_stat_functions(pdf, psdf)
def test_stat_functions_with_no_numeric_columns(self):
pdf = pd.DataFrame(
{
"A": ["a", None, "c", "d", None, "f", "g"],
"B": ["A", "B", "C", None, "E", "F", None],
}
)
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf, psdf)
def test_sum(self):
pdf = pd.DataFrame({"a": [1, 2, 3, np.nan], "b": [0.1, np.nan, 0.3, np.nan]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sum(), pdf.sum())
self.assert_eq(psdf.sum(axis=1), pdf.sum(axis=1))
self.assert_eq(psdf.sum(min_count=3), pdf.sum(min_count=3))
self.assert_eq(psdf.sum(axis=1, min_count=1), pdf.sum(axis=1, min_count=1))
self.assert_eq(psdf.loc[[]].sum(), pdf.loc[[]].sum())
self.assert_eq(psdf.loc[[]].sum(min_count=1), pdf.loc[[]].sum(min_count=1))
self.assert_eq(psdf["a"].sum(), pdf["a"].sum())
self.assert_eq(psdf["a"].sum(min_count=3), pdf["a"].sum(min_count=3))
self.assert_eq(psdf["b"].sum(min_count=3), pdf["b"].sum(min_count=3))
self.assert_eq(psdf["a"].loc[[]].sum(), pdf["a"].loc[[]].sum())
self.assert_eq(psdf["a"].loc[[]].sum(min_count=1), pdf["a"].loc[[]].sum(min_count=1))
def test_product(self):
pdf = pd.DataFrame(
{"a": [1, -2, -3, np.nan], "b": [0.1, np.nan, -0.3, np.nan], "c": [10, 20, 0, -10]}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.product(), pdf.product(), check_exact=False)
self.assert_eq(psdf.product(axis=1), pdf.product(axis=1))
self.assert_eq(psdf.product(min_count=3), pdf.product(min_count=3), check_exact=False)
self.assert_eq(psdf.product(axis=1, min_count=1), pdf.product(axis=1, min_count=1))
self.assert_eq(psdf.loc[[]].product(), pdf.loc[[]].product())
self.assert_eq(psdf.loc[[]].product(min_count=1), pdf.loc[[]].product(min_count=1))
self.assert_eq(psdf["a"].product(), pdf["a"].product(), check_exact=False)
self.assert_eq(
psdf["a"].product(min_count=3), pdf["a"].product(min_count=3), check_exact=False
)
self.assert_eq(psdf["b"].product(min_count=3), pdf["b"].product(min_count=3))
self.assert_eq(psdf["c"].product(min_count=3), pdf["c"].product(min_count=3))
self.assert_eq(psdf["a"].loc[[]].product(), pdf["a"].loc[[]].product())
self.assert_eq(
psdf["a"].loc[[]].product(min_count=1), pdf["a"].loc[[]].product(min_count=1)
)
def test_abs(self):
pdf = pd.DataFrame(
{
"A": [1, -2, np.nan, -4, 5],
"B": [1.0, -2, np.nan, -4, 5],
"C": [-6.0, -7, -8, np.nan, 10],
"D": ["a", "b", "c", "d", np.nan],
"E": [True, np.nan, False, True, True],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.A.abs(), pdf.A.abs())
self.assert_eq(psdf.B.abs(), pdf.B.abs())
self.assert_eq(psdf.E.abs(), pdf.E.abs())
# pandas' bug?
# self.assert_eq(psdf[["B", "C", "E"]].abs(), pdf[["B", "C", "E"]].abs())
self.assert_eq(psdf[["B", "C"]].abs(), pdf[["B", "C"]].abs())
self.assert_eq(psdf[["E"]].abs(), pdf[["E"]].abs())
with self.assertRaisesRegex(
TypeError, "bad operand type for abs\\(\\): object \\(string\\)"
):
psdf.abs()
with self.assertRaisesRegex(
TypeError, "bad operand type for abs\\(\\): object \\(string\\)"
):
psdf.D.abs()
def test_axis_on_dataframe(self):
# The number of each count is intentionally big
# because when data is small, it executes a shortcut.
# Less than 'compute.shortcut_limit' will execute a shortcut
# by using collected pandas dataframe directly.
# now we set the 'compute.shortcut_limit' as 1000 explicitly
with option_context("compute.shortcut_limit", 1000):
pdf = pd.DataFrame(
{
"A": [1, -2, 3, -4, 5] * 300,
"B": [1.0, -2, 3, -4, 5] * 300,
"C": [-6.0, -7, -8, -9, 10] * 300,
"D": [True, False, True, False, False] * 300,
},
index=range(10, 15001, 10),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.count(axis=1), pdf.count(axis=1))
self.assert_eq(psdf.var(axis=1), pdf.var(axis=1))
self.assert_eq(psdf.var(axis=1, ddof=0), pdf.var(axis=1, ddof=0))
self.assert_eq(psdf.std(axis=1), pdf.std(axis=1))
self.assert_eq(psdf.std(axis=1, ddof=0), pdf.std(axis=1, ddof=0))
self.assert_eq(psdf.max(axis=1), pdf.max(axis=1))
self.assert_eq(psdf.min(axis=1), pdf.min(axis=1))
self.assert_eq(psdf.sum(axis=1), pdf.sum(axis=1))
self.assert_eq(psdf.product(axis=1), pdf.product(axis=1))
self.assert_eq(psdf.kurtosis(axis=1), pdf.kurtosis(axis=1))
self.assert_eq(psdf.skew(axis=1), pdf.skew(axis=1))
self.assert_eq(psdf.mean(axis=1), pdf.mean(axis=1))
self.assert_eq(psdf.sem(axis=1), pdf.sem(axis=1))
self.assert_eq(psdf.sem(axis=1, ddof=0), pdf.sem(axis=1, ddof=0))
self.assert_eq(
psdf.count(axis=1, numeric_only=True), pdf.count(axis=1, numeric_only=True)
)
self.assert_eq(psdf.var(axis=1, numeric_only=True), pdf.var(axis=1, numeric_only=True))
self.assert_eq(
psdf.var(axis=1, ddof=0, numeric_only=True),
pdf.var(axis=1, ddof=0, numeric_only=True),
)
self.assert_eq(psdf.std(axis=1, numeric_only=True), pdf.std(axis=1, numeric_only=True))
self.assert_eq(
psdf.std(axis=1, ddof=0, numeric_only=True),
pdf.std(axis=1, ddof=0, numeric_only=True),
)
self.assert_eq(
psdf.max(axis=1, numeric_only=True),
pdf.max(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.min(axis=1, numeric_only=True),
pdf.min(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.sum(axis=1, numeric_only=True),
pdf.sum(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.product(axis=1, numeric_only=True),
pdf.product(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.kurtosis(axis=1, numeric_only=True), pdf.kurtosis(axis=1, numeric_only=True)
)
self.assert_eq(
psdf.skew(axis=1, numeric_only=True), pdf.skew(axis=1, numeric_only=True)
)
self.assert_eq(
psdf.mean(axis=1, numeric_only=True), pdf.mean(axis=1, numeric_only=True)
)
self.assert_eq(psdf.sem(axis=1, numeric_only=True), pdf.sem(axis=1, numeric_only=True))
self.assert_eq(
psdf.sem(axis=1, ddof=0, numeric_only=True),
pdf.sem(axis=1, ddof=0, numeric_only=True),
)
def test_corr(self):
# Disable arrow execution since corr() is using UDT internally which is not supported.
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# DataFrame
# we do not handle NaNs for now
pdf = makeMissingDataframe(0.3, 42).fillna(0)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.corr(), pdf.corr(), check_exact=False)
# Series
pser_a = pdf.A
pser_b = pdf.B
psser_a = psdf.A
psser_b = psdf.B
self.assertAlmostEqual(psser_a.corr(psser_b), pser_a.corr(pser_b))
self.assertRaises(TypeError, lambda: psser_a.corr(psdf))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.corr(), pdf.corr(), check_exact=False)
# Series
pser_xa = pdf[("X", "A")]
pser_xb = pdf[("X", "B")]
psser_xa = psdf[("X", "A")]
psser_xb = psdf[("X", "B")]
self.assert_eq(psser_xa.corr(psser_xb), pser_xa.corr(pser_xb), almost=True)
def test_cov_corr_meta(self):
# Disable arrow execution since corr() is using UDT internally which is not supported.
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
pdf = pd.DataFrame(
{
"a": np.array([1, 2, 3], dtype="i1"),
"b": np.array([1, 2, 3], dtype="i2"),
"c": np.array([1, 2, 3], dtype="i4"),
"d": np.array([1, 2, 3]),
"e": np.array([1.0, 2.0, 3.0], dtype="f4"),
"f": np.array([1.0, 2.0, 3.0]),
"g": np.array([True, False, True]),
"h": np.array(list("abc")),
},
index=pd.Index([1, 2, 3], name="myindex"),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.corr(), pdf.corr())
def test_stats_on_boolean_dataframe(self):
pdf = pd.DataFrame({"A": [True, False, True], "B": [False, False, True]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.min(), pdf.min())
self.assert_eq(psdf.max(), pdf.max())
self.assert_eq(psdf.count(), pdf.count())
self.assert_eq(psdf.sum(), pdf.sum())
self.assert_eq(psdf.product(), pdf.product())
self.assert_eq(psdf.mean(), pdf.mean())
self.assert_eq(psdf.var(), pdf.var(), check_exact=False)
self.assert_eq(psdf.var(ddof=0), pdf.var(ddof=0), check_exact=False)
self.assert_eq(psdf.std(), pdf.std(), check_exact=False)
self.assert_eq(psdf.std(ddof=0), pdf.std(ddof=0), check_exact=False)
self.assert_eq(psdf.sem(), pdf.sem(), check_exact=False)
self.assert_eq(psdf.sem(ddof=0), pdf.sem(ddof=0), check_exact=False)
def test_stats_on_boolean_series(self):
pser = pd.Series([True, False, True])
psser = ps.from_pandas(pser)
self.assert_eq(psser.min(), pser.min())
self.assert_eq(psser.max(), pser.max())
self.assert_eq(psser.count(), pser.count())
self.assert_eq(psser.sum(), pser.sum())
self.assert_eq(psser.product(), pser.product())
self.assert_eq(psser.mean(), pser.mean())
self.assert_eq(psser.var(), pser.var(), almost=True)
self.assert_eq(psser.var(ddof=0), pser.var(ddof=0), almost=True)
self.assert_eq(psser.std(), pser.std(), almost=True)
self.assert_eq(psser.std(ddof=0), pser.std(ddof=0), almost=True)
self.assert_eq(psser.sem(), pser.sem(), almost=True)
self.assert_eq(psser.sem(ddof=0), pser.sem(ddof=0), almost=True)
def test_stats_on_non_numeric_columns_should_be_discarded_if_numeric_only_is_true(self):
pdf = pd.DataFrame({"i": [0, 1, 2], "b": [False, False, True], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf[["i", "s"]].max(numeric_only=True), pdf[["i", "s"]].max(numeric_only=True)
)
self.assert_eq(
psdf[["b", "s"]].max(numeric_only=True), pdf[["b", "s"]].max(numeric_only=True)
)
self.assert_eq(
psdf[["i", "s"]].min(numeric_only=True), pdf[["i", "s"]].min(numeric_only=True)
)
self.assert_eq(
psdf[["b", "s"]].min(numeric_only=True), pdf[["b", "s"]].min(numeric_only=True)
)
self.assert_eq(psdf.count(numeric_only=True), pdf.count(numeric_only=True))
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True))
self.assert_eq(psdf.product(numeric_only=True), pdf.product(numeric_only=True))
else:
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True).astype(int))
self.assert_eq(
psdf.product(numeric_only=True), pdf.product(numeric_only=True).astype(int)
)
self.assert_eq(psdf.mean(numeric_only=True), pdf.mean(numeric_only=True))
self.assert_eq(psdf.var(numeric_only=True), pdf.var(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.var(ddof=0, numeric_only=True),
pdf.var(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(psdf.std(numeric_only=True), pdf.std(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.std(ddof=0, numeric_only=True),
pdf.std(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(psdf.sem(numeric_only=True), pdf.sem(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.sem(ddof=0, numeric_only=True),
pdf.sem(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(len(psdf.median(numeric_only=True)), len(pdf.median(numeric_only=True)))
self.assert_eq(len(psdf.kurtosis(numeric_only=True)), len(pdf.kurtosis(numeric_only=True)))
self.assert_eq(len(psdf.skew(numeric_only=True)), len(pdf.skew(numeric_only=True)))
# Boolean was excluded because of a behavior change in NumPy
# https://github.com/numpy/numpy/pull/16273#discussion_r641264085 which pandas inherits
# but this behavior is inconsistent in pandas context.
# Boolean column in quantile tests are excluded for now.
# TODO(SPARK-35555): track and match the behavior of quantile to pandas'
pdf = pd.DataFrame({"i": [0, 1, 2], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(
len(psdf.quantile(q=0.5, numeric_only=True)),
len(pdf.quantile(q=0.5, numeric_only=True)),
)
self.assert_eq(
len(psdf.quantile(q=[0.25, 0.5, 0.75], numeric_only=True)),
len(pdf.quantile(q=[0.25, 0.5, 0.75], numeric_only=True)),
)
def test_numeric_only_unsupported(self):
pdf = pd.DataFrame({"i": [0, 1, 2], "b": [False, False, True], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True))
self.assert_eq(
psdf[["i", "b"]].sum(numeric_only=False), pdf[["i", "b"]].sum(numeric_only=False)
)
else:
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True).astype(int))
self.assert_eq(
psdf[["i", "b"]].sum(numeric_only=False),
pdf[["i", "b"]].sum(numeric_only=False).astype(int),
)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.sum(numeric_only=False)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.s.sum()
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_stats import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
jtmorgan/hostbot | top_1000_report.py | 1 | 10578 | #! /usr/bin/env python
from datetime import datetime, timedelta
import hb_config
import json
import pandas as pd
import requests
from requests_oauthlib import OAuth1
from urllib import parse
rt_header = """== Popular articles {date7} to {date1} ==
Last updated on ~~~~~
{{| class="wikitable sortable"
!Rank
!Article
!Total weekly views
!Days in top 1k this week
"""
footer = """|}
<!--IMPORTANT add all categories to the top section of the page, not here. Otherwise, they will get removed when the bot runs tomorrow! -->
"""
rt_row = """|-
|{rank}
|[[w:{title}|{title}]]
|{week_total}
|{days_in_topk}
"""
def get_yesterdates(lookback=7):
"""
Accepts a lookback parameter of how many days ago to gather data for (not including the current day per UTC time)
Defaults to seven days lookback (val must be at least 1)
Returns a list of dictionaries with the previous n dates (exclusive), in reverse chronological order
"""
date_range = []
for d in range(1, lookback + 1):
date_parts = {'year': datetime.strftime(datetime.now() - timedelta(d), '%Y'),
'month' : datetime.strftime(datetime.now() - timedelta(d), '%m'),
'day': datetime.strftime(datetime.now() - timedelta(d), '%d'),
}
date_parts['display_date'] = date_parts['year'] + "-" + date_parts['month'] + "-" + date_parts['day']
date_parts['api_date'] = date_parts['year'] + date_parts['month'] + date_parts['day'] + "00"
date_range.append(date_parts)
return date_range
def get_all_topk_articles(day_range):
"""
Accepts a list of dicts with year, month, and day values
Returns a dictionary (article titles as keys)
with all articles that were in the topk list during those dates
and the pageview counts for each of the dates the article appeared in the topk
Example query: https://wikimedia.org/api/rest_v1/metrics/pageviews/top/en.wikipedia.org/all-access/2020/03/31
"""
q_template= "https://wikimedia.org/api/rest_v1/metrics/pageviews/top/en.wikipedia.org/all-access/{year}/{month}/{day}"
all_articles = {}
for day_val in day_range:
q_string = q_template.format(**day_val)
r = requests.get(
url = q_string,
headers = {'User-Agent': "hostbot (https://wikitech.wikimedia.org/wiki/Tool:HostBot, jonnymorgan.esq@gmail.com)"},
)
# print(r.headers)
# print(r.text)
# print(r.url)
response = r.json()
# print(response)
# response = requests.get(q_string).json()
top_articles_list = response['items'][0]['articles']
for ar in top_articles_list:
if ar['article'] in all_articles.keys():
all_articles[ar['article']].update({day_val['api_date'] : ar['views']})
else:
all_articles.update({ar['article'] : {day_val['api_date'] : ar['views']}})
return all_articles
def ar_days_in_topk(day_range, ar_dict):
"""
Accepts a day range dictionary
And a nested dict with articles as keys
And as values varying numbers of k,v pairs
Returns the article dictionary with a new k,v pair value
that counts the number of existing k,v pairs in that article dict
"""
for k,v in ar_dict.items():
v['topk_days'] = len(ar_dict[k])
return ar_dict
def get_daily_non_topk_counts(day_range, ar_dict):
"""
Accepts a list of dicts with year, month, and day values
And a dict with article titles as keys dicts with different numbers of k,v pairs as values
Returns a dict that contains for each article the difference between the length of the day range
And the number of keys in each dict
Example query: https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia.org/all-access/user/2009_swine_flu_pandemic/daily/2020032500/2020033100
"""
q_template= "https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/en.wikipedia.org/all-access/user/{article}/daily/{day7}/{day1}"
for k,v in ar_dict.items():
if len(v) < 8: #if this article didn't spend all week among the top 1000
safe_title = parse.quote(k, safe='') #in case there are weird chars in title
q_string = q_template.format(article = safe_title, day7 = day_range[6]['api_date'], day1 = day_range[0]['api_date'])
# print(q_string)
r = requests.get(
url = q_string,
headers = {'User-Agent': "hostbot (https://wikitech.wikimedia.org/wiki/Tool:HostBot, jonnymorgan.esq@gmail.com)"},
)
# print(r.headers)
# print(r.text)
# print(r.url)
response = r.json()
# print(response)
# response = requests.get(q_string).json()
ar_views = response['items']
# print(ar_views)
for d in ar_views:
if d['timestamp'] not in v.keys():
v.update({d['timestamp'] : d['views']})
else:
pass
return ar_dict
def fill_null_date_vals(day_range, ar_dict):
"""
Accepts a list of dicts with year, month, and day values
And a dict with article titles as keys and gaps in the date keys
Returns the article dictionary with each sub-dict fully populated
With pageview values for all dates in range, even if val is 0
"""
#https://www.geeksforgeeks.org/dictionary-methods-in-python-set-2-update-has_key-fromkeys/
for day_val in week_of_days:
for v in ar_dict.values():
if len(v) < 8: #if we still don't have any pageviews for some days
v.setdefault(day_val['api_date'], 0) #adds a key with val of 0 if no key present
else:
pass
return ar_dict
def format_row(rank, title, week_total, days_in_topk, row_template):
table_row = {'rank': rank,
'title' : title.replace("_"," "),
'week_total' : week_total,
'days_in_topk' : days_in_topk
}
row = row_template.format(**table_row)
# print(row)
return(row)
def get_token(auth1):
"""
Accepts an auth object for a user
Returns an edit token for the specified wiki
"""
result = requests.get(
url="https://en.wikipedia.org/w/api.php", #TODO add to config
params={
'action': "query",
'meta': "tokens",
'type': "csrf",
'format': "json"
},
headers={'User-Agent': "hostbot (https://wikitech.wikimedia.org/wiki/Tool:HostBot, jonnymorgan.esq@gmail.com)"}, #TODO add to config
auth=auth1,
).json()
# print(result)
edit_token = result['query']['tokens']['csrftoken']
# print(edit_token)
return(edit_token)
def publish_report(output, edit_sum, auth1, edit_token):
"""
Accepts the page text, credentials and edit token
Publishes the formatted page text to the specified wiki
"""
response = requests.post(
url = "https://en.wikipedia.org/w/api.php", #TODO add to config
data={
'action': "edit",
'title': "User:HostBot/Top_1000_report", #TODO add to config
'section': "1",
'summary': edit_sum, #TODO add to config
'text': output,
'bot': 1,
'token': edit_token,
'format': "json"
},
headers={'User-Agent': "hostbot (https://wikitech.wikimedia.org/wiki/Tool:HostBot, jonnymorgan.esq@gmail.com)"}, #TODO add to config
auth=auth1
)
if __name__ == "__main__":
auth1 = OAuth1("b5d87cbe96174f9435689a666110159c",
hb_config.client_secret,
"ca1b222d687be9ac33cfb49676f5bfd2",
hb_config.resource_owner_secret)
#get previous week's date info for query and reporting
week_of_days = get_yesterdates(lookback=7)
#get all of the articles that appeared on the topk list that week
all_articles = get_all_topk_articles(week_of_days)
#get counts for all days each article was in the top 1000
# all_articles = get_daily_topk_counts(week_of_days, all_articles)
#add number of days each article appears in the topk list. could do this in first function too
all_articles = ar_days_in_topk(len(week_of_days), all_articles)
#add page counts for days the article was not in the topk list
all_articles = get_daily_non_topk_counts(week_of_days, all_articles)
all_articles = fill_null_date_vals(week_of_days, all_articles)
#now we're ready to make a dataframe!
df_aa = pd.DataFrame.from_dict(all_articles, orient="index")
#sum across the daily counts
#https://stackoverflow.com/questions/25748683/pandas-sum-dataframe-rows-for-given-columns
df_aa['week_total'] = df_aa.sum(axis=1)
#make the title NOT the index. Should do this when creating the frame, instead
df_aa.reset_index(inplace=True)
#rename title column. Should do this when creating the frame, instead
df_aa.rename(columns = {'index' : 'title'}, inplace=True)
#remove blacklisted titles--pages we don't care about, for these purposes. Although... we could keep them I guess.
blacklist = ["Main_Page", "Special:", "Category:", "Portal:", "Template:", "Wikipedia:", "Talk:", "User:", "_talk:", "Help:", "File:", "United_States_Senate",]
df_aa = df_aa[~df_aa['title'].str.contains('|'.join(blacklist))]
#sort by weekly views
df_aa.sort_values('week_total', ascending=False, inplace=True)
#add rank column based on weekly views
new_rank = range(1, len(df_aa)+1)
df_aa['rank'] = list(new_rank)
#reset the index to reflect the final ranking, dropping the existing index this time
df_aa.reset_index(drop=True, inplace=True)
#start and end dates for header and edit comment
header_dates = {'date1' : week_of_days[0]['display_date'],
'date7' : week_of_days[6]['display_date']
}
#format the header template
header = rt_header.format(**header_dates)
report_rows = [format_row(a, b, c, d, rt_row) #this is messy
for a, b, c, d
in zip(df_aa['rank'],
df_aa['title'],
df_aa['week_total'],
df_aa['topk_days'],
)]
rows_wiki = ''.join(report_rows)
output = header + rows_wiki + footer
# print(output)
edit_token = get_token(auth1)
edit_sum = "Popular articles from {date7} to {date1}".format(**header_dates)
publish_report(output, edit_sum, auth1, edit_token)
| mit |
jpautom/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
akosyakov/intellij-community | python/helpers/pydev/pydev_ipython/matplotlibtools.py | 52 | 5401 |
import sys
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'osx': 'MacOSX'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
def do_enable_gui(guiname):
from pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
import traceback
traceback.print_exc()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("Debug console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
def find_gui_and_backend():
"""Return the gui and mpl backend."""
matplotlib = sys.modules['matplotlib']
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend
def is_interactive_backend(backend):
""" Check if backend is interactive """
matplotlib = sys.modules['matplotlib']
from matplotlib.rcsetup import interactive_bk, non_interactive_bk
if backend in interactive_bk:
return True
elif backend in non_interactive_bk:
return False
else:
return matplotlib.is_interactive()
def patch_use(enable_gui_function):
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_use(*args, **kwargs):
matplotlib.real_use(*args, **kwargs)
gui, backend = find_gui_and_backend()
enable_gui_function(gui)
setattr(matplotlib, "real_use", getattr(matplotlib, "use"))
setattr(matplotlib, "use", patched_use)
def patch_is_interactive():
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_is_interactive():
return matplotlib.rcParams['interactive']
setattr(matplotlib, "real_is_interactive", getattr(matplotlib, "is_interactive"))
setattr(matplotlib, "is_interactive", patched_is_interactive)
def activate_matplotlib(enable_gui_function):
"""Set interactive to True for interactive backends.
enable_gui_function - Function which enables gui, should be run in the main thread.
"""
matplotlib = sys.modules['matplotlib']
gui, backend = find_gui_and_backend()
is_interactive = is_interactive_backend(backend)
if is_interactive:
enable_gui_function(gui)
if not matplotlib.is_interactive():
sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend)
matplotlib.interactive(True)
else:
if matplotlib.is_interactive():
sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend)
matplotlib.interactive(False)
patch_use(enable_gui_function)
patch_is_interactive()
def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args,**kw):
wrapper.called = False
out = func(*args,**kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper
def activate_pylab():
pylab = sys.modules['pylab']
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def activate_pyplot():
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pyplot.draw_if_interactive = flag_calls(pyplot.draw_if_interactive)
| apache-2.0 |
thaumos/ansible | hacking/aws_config/build_iam_policy_framework.py | 25 | 11861 | # Requires pandas, bs4, html5lib, and lxml
#
# Call script with the output from aws_resource_actions callback, e.g.
# python build_iam_policy_framework.py ['ec2:AuthorizeSecurityGroupEgress', 'ec2:AuthorizeSecurityGroupIngress', 'sts:GetCallerIdentity']
#
# The sample output:
# {
# "Version": "2012-10-17",
# "Statement": [
# {
# "Sid": "AnsibleEditor0",
# "Effect": "Allow",
# "Action": [
# "ec2:AuthorizeSecurityGroupEgress",
# "ec2:AuthorizeSecurityGroupIngress"
# ],
# "Resource": "arn:aws:ec2:${Region}:${Account}:security-group/${SecurityGroupId}"
# },
# {
# "Sid": "AnsibleEditor1",
# "Effect": "Allow",
# "Action": [
# "sts:GetCallerIdentity"
# ],
# "Resource": "*"
# }
# ]
# }
#
# Policy troubleshooting:
# - If there are more actions in the policy than you provided, AWS has documented dependencies for some of your actions and
# those have been added to the policy.
# - If there are fewer actions in the policy than you provided, some of your actions are not in the IAM table of actions for
# that service. For example, the API call s3:DeleteObjects does not actually correlate to the permission needed in a policy.
# In this case s3:DeleteObject is the permission required to allow both the s3:DeleteObjects action and the s3:DeleteObject action.
# - The policies output are only as accurate as the AWS documentation. If the policy does not permit the
# necessary actions, look for undocumented dependencies. For example, redshift:CreateCluster requires ec2:DescribeVpcs,
# ec2:DescribeSubnets, ec2:DescribeSecurityGroups, and ec2:DescribeInternetGateways, but AWS does not document this.
#
import json
import requests
import sys
missing_dependencies = []
try:
import pandas as pd
except ImportError:
missing_dependencies.append('pandas')
try:
import bs4
except ImportError:
missing_dependencies.append('bs4')
try:
import html5lib
except ImportError:
missing_dependencies.append('html5lib')
try:
import lxml
except ImportError:
missing_dependencies.append('lxml')
irregular_service_names = {
'a4b': 'alexaforbusiness',
'appstream': 'appstream2.0',
'acm': 'certificatemanager',
'acm-pca': 'certificatemanagerprivatecertificateauthority',
'aws-marketplace-management': 'marketplacemanagementportal',
'ce': 'costexplorerservice',
'cognito-identity': 'cognitoidentity',
'cognito-sync': 'cognitosync',
'cognito-idp': 'cognitouserpools',
'cur': 'costandusagereport',
'dax': 'dynamodbacceleratordax',
'dlm': 'datalifecyclemanager',
'dms': 'databasemigrationservice',
'ds': 'directoryservice',
'ec2messages': 'messagedeliveryservice',
'ecr': 'ec2containerregistry',
'ecs': 'elasticcontainerservice',
'eks': 'elasticcontainerserviceforkubernetes',
'efs': 'elasticfilesystem',
'es': 'elasticsearchservice',
'events': 'cloudwatchevents',
'firehose': 'kinesisfirehose',
'fms': 'firewallmanager',
'health': 'healthapisandnotifications',
'importexport': 'importexportdiskservice',
'iot1click': 'iot1-click',
'kafka': 'managedstreamingforkafka',
'kinesisvideo': 'kinesisvideostreams',
'kms': 'keymanagementservice',
'license-manager': 'licensemanager',
'logs': 'cloudwatchlogs',
'opsworks-cm': 'opsworksconfigurationmanagement',
'mediaconnect': 'elementalmediaconnect',
'mediaconvert': 'elementalmediaconvert',
'medialive': 'elementalmedialive',
'mediapackage': 'elementalmediapackage',
'mediastore': 'elementalmediastore',
'mgh': 'migrationhub',
'mobiletargeting': 'pinpoint',
'pi': 'performanceinsights',
'pricing': 'pricelist',
'ram': 'resourceaccessmanager',
'resource-groups': 'resourcegroups',
'sdb': 'simpledb',
'servicediscovery': 'cloudmap',
'serverlessrepo': 'serverlessapplicationrepository',
'sms': 'servermigrationservice',
'sms-voice': 'pinpointsmsandvoiceservice',
'sso-directory': 'ssodirectory',
'ssm': 'systemsmanager',
'ssmmessages': 'sessionmanagermessagegatewayservice',
'states': 'stepfunctions',
'sts': 'securitytokenservice',
'swf': 'simpleworkflowservice',
'tag': 'resourcegrouptaggingapi',
'transfer': 'transferforsftp',
'waf-regional': 'wafregional',
'wam': 'workspacesapplicationmanager',
'xray': 'x-ray'
}
irregular_service_links = {
'apigateway': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_manageamazonapigateway.html'
],
'aws-marketplace': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsmarketplace.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsmarketplacemeteringservice.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsprivatemarketplace.html'
],
'discovery': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_applicationdiscovery.html'
],
'elasticloadbalancing': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_elasticloadbalancing.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_elasticloadbalancingv2.html'
],
'globalaccelerator': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_globalaccelerator.html'
]
}
def get_docs_by_prefix(prefix):
amazon_link_form = 'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazon{0}.html'
aws_link_form = 'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_aws{0}.html'
if prefix in irregular_service_links:
links = irregular_service_links[prefix]
else:
if prefix in irregular_service_names:
prefix = irregular_service_names[prefix]
links = [amazon_link_form.format(prefix), aws_link_form.format(prefix)]
return links
def get_html(links):
html_list = []
for link in links:
html = requests.get(link).content
try:
parsed_html = pd.read_html(html)
html_list.append(parsed_html)
except ValueError as e:
if 'No tables found' in str(e):
pass
else:
raise e
return html_list
def get_tables(service):
links = get_docs_by_prefix(service)
html_list = get_html(links)
action_tables = []
arn_tables = []
for df_list in html_list:
for df in df_list:
table = json.loads(df.to_json(orient='split'))
table_data = table['data'][0]
if 'Actions' in table_data and 'Resource Types (*required)' in table_data:
action_tables.append(table['data'][1::])
elif 'Resource Types' in table_data and 'ARN' in table_data:
arn_tables.append(table['data'][1::])
# Action table indices:
# 0: Action, 1: Description, 2: Access level, 3: Resource type, 4: Condition keys, 5: Dependent actions
# ARN tables indices:
# 0: Resource type, 1: ARN template, 2: Condition keys
return action_tables, arn_tables
def add_dependent_action(resources, dependency):
resource, action = dependency.split(':')
if resource in resources:
resources[resource].append(action)
else:
resources[resource] = [action]
return resources
def get_dependent_actions(resources):
for service in dict(resources):
action_tables, arn_tables = get_tables(service)
for found_action_table in action_tables:
for action_stuff in found_action_table:
if action_stuff is None:
continue
if action_stuff[0] in resources[service] and action_stuff[5]:
dependencies = action_stuff[5].split()
if isinstance(dependencies, list):
for dependency in dependencies:
resources = add_dependent_action(resources, dependency)
else:
resources = add_dependent_action(resources, dependencies)
return resources
def get_actions_by_service(resources):
service_action_dict = {}
dependencies = {}
for service in resources:
action_tables, arn_tables = get_tables(service)
# Create dict of the resource type to the corresponding ARN
arn_dict = {}
for found_arn_table in arn_tables:
for arn_stuff in found_arn_table:
arn_dict["{0}*".format(arn_stuff[0])] = arn_stuff[1]
# Create dict of the action to the corresponding ARN
action_dict = {}
for found_action_table in action_tables:
for action_stuff in found_action_table:
if action_stuff[0] is None:
continue
if arn_dict.get(action_stuff[3]):
action_dict[action_stuff[0]] = arn_dict[action_stuff[3]]
else:
action_dict[action_stuff[0]] = None
service_action_dict[service] = action_dict
return service_action_dict
def get_resource_arns(aws_actions, action_dict):
resource_arns = {}
for resource_action in aws_actions:
resource, action = resource_action.split(':')
if action not in action_dict:
continue
if action_dict[action] is None:
resource = "*"
else:
resource = action_dict[action].replace("${Partition}", "aws")
if resource not in resource_arns:
resource_arns[resource] = []
resource_arns[resource].append(resource_action)
return resource_arns
def get_resources(actions):
resources = {}
for action in actions:
resource, action = action.split(':')
if resource not in resources:
resources[resource] = []
resources[resource].append(action)
return resources
def combine_arn_actions(resources, service_action_arn_dict):
arn_actions = {}
for service in service_action_arn_dict:
service_arn_actions = get_resource_arns(aws_actions, service_action_arn_dict[service])
for resource in service_arn_actions:
if resource in arn_actions:
arn_actions[resource].extend(service_arn_actions[resource])
else:
arn_actions[resource] = service_arn_actions[resource]
return arn_actions
def combine_actions_and_dependent_actions(resources):
aws_actions = []
for resource in resources:
for action in resources[resource]:
aws_actions.append('{0}:{1}'.format(resource, action))
return set(aws_actions)
def get_actions_restricted_by_arn(aws_actions):
resources = get_resources(aws_actions)
resources = get_dependent_actions(resources)
service_action_arn_dict = get_actions_by_service(resources)
aws_actions = combine_actions_and_dependent_actions(resources)
return combine_arn_actions(aws_actions, service_action_arn_dict)
def main(aws_actions):
arn_actions = get_actions_restricted_by_arn(aws_actions)
statement = []
for resource_restriction in arn_actions:
statement.append({
"Sid": "AnsibleEditor{0}".format(len(statement)),
"Effect": "Allow",
"Action": arn_actions[resource_restriction],
"Resource": resource_restriction
})
policy = {"Version": "2012-10-17", "Statement": statement}
print(json.dumps(policy, indent=4))
if __name__ == '__main__':
if missing_dependencies:
sys.exit('Missing Python libraries: {0}'.format(', '.join(missing_dependencies)))
actions = sys.argv[1:]
if len(actions) == 1:
actions = sys.argv[1].split(',')
aws_actions = [action.strip('[], "\'') for action in actions]
main(aws_actions)
| gpl-3.0 |
evertonaleixo/tarp | DeepLearning/deep-belief-network-example.py | 1 | 1185 | # coding=utf-8
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from sklearn.metrics.classification import accuracy_score
import numpy as np
from dbn import SupervisedDBNClassification
# Loading dataset
digits = load_digits()
X, Y = digits.data, digits.target
# Data scaling
X = (X / 16).astype(np.float32)
# Splitting data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
# Training
classifier = SupervisedDBNClassification(hidden_layers_structure=[256, 256],
learning_rate_rbm=0.1,
learning_rate=0.1,
n_epochs_rbm=10,
n_iter_backprop=100,
l2_regularization=0.0,
batch_size=32,
activation_function='relu',
dropout_p=0.2)
classifier.fit(X_train, Y_train)
# Test
Y_pred = classifier.predict(X_test)
print('Done.\nAccuracy: ')
print(accuracy_score(Y_test, Y_pred)) | apache-2.0 |
waterponey/scikit-learn | sklearn/datasets/tests/test_base.py | 13 | 8907 | import os
import shutil
import tempfile
import warnings
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import with_setup
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
# test return_X_y option
X_y_tuple = load_digits(return_X_y=True)
bunch = load_digits()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
assert_equal(len(res.feature_names), 10)
# test return_X_y option
X_y_tuple = load_diabetes(return_X_y=True)
bunch = load_diabetes()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_linnerud(return_X_y=True)
bunch = load_linnerud()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_iris(return_X_y=True)
bunch = load_iris()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_breast_cancer(return_X_y=True)
bunch = load_breast_cancer()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_boston(return_X_y=True)
bunch = load_boston()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a suprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert_true("data" in dir(data))
| bsd-3-clause |
hansonrobotics/chatbot | src/chatbot/stats.py | 1 | 3618 | import os
import logging
import pandas as pd
import glob
import re
import datetime as dt
from collections import Counter
logger = logging.getLogger('hr.chatbot.stats')
trace_pattern = re.compile(
r'../(?P<fname>.*), (?P<tloc>\(.*\)), (?P<pname>.*), (?P<ploc>\(.*\))')
def collect_history_data(history_dir, days):
today = dt.datetime.utcnow()
dfs = []
for d in glob.glob('{}/*'.format(history_dir)):
if os.path.isdir(d):
dirname = os.path.basename(d)
dirdate = None
try:
dirdate = dt.datetime.strptime(dirname, '%Y%m%d')
except Exception as ex:
logger.error(ex)
if dirdate and (days == -1 or (today - dirdate).days < days):
for fname in glob.glob('{}/{}/*.csv'.format(history_dir, dirname)):
try:
dfs.append(pd.read_csv(fname))
except Exception as ex:
logger.warn("Reading {} error: {}".format(fname, ex))
if not dfs:
return None
df = pd.concat(dfs, ignore_index=True)
df = df[df.Datetime != 'Datetime'].sort(
['User', 'Datetime']).drop_duplicates()
return df
def history_stats(history_dir, days):
df = collect_history_data(history_dir, days)
if df is None:
return {}
if days == -1:
stats_csv = '{}/full_history.csv'.format(history_dir)
else:
stats_csv = '{}/last_{}_days.csv'.format(history_dir, days)
columns = [u'Datetime', u'Revision', u'User', u'BotName',
u'AnsweredBy', u'Question', u'Answer', u'Rate', u'Trace']
df.to_csv(stats_csv, index=False, columns=columns)
logger.info("Write statistic records to {}".format(stats_csv))
records = len(df)
rates = len(df[df.Rate.notnull()])
good_rates = len(df[df.Rate.isin(['good'])])
bad_rates = len(df[df.Rate.isin(['bad'])])
if records > 0:
csd = float(records - bad_rates) / records
response = {
'customers_satisfaction_degree': csd,
'number_of_records': records,
'number_of_rates': rates,
'number_of_good_rates': good_rates,
'number_of_bad_rates': bad_rates,
}
return response
def playback_history(df):
from client import Client
client = Client(os.environ.get('HR_CHATBOT_AUTHKEY', 'AAAAB3NzaC'), test=True)
pattern_column = []
for question in df.Question:
answer = client.ask(question, True)
traces = answer.get('trace')
patterns = []
if traces:
for trace in traces:
match_obj = trace_pattern.match(trace)
if match_obj:
patterns.append(match_obj.group('pname'))
pattern_column.append(patterns)
df.loc[:,'Pattern'] = pd.Series(pattern_column, index=df.index)
return df
def pattern_stats(history_dir, days):
df = collect_history_data(history_dir, days)
if df is None:
return {}
df = playback_history(df)
patterns = sum(df.Pattern, [])
counter = Counter(patterns)
pattern_freq = pd.Series(counter)
pattern_freq.sort(ascending=False)
stats_csv = '{}/pattern_frequency.csv'.format(history_dir)
pattern_freq.to_csv(stats_csv)
logger.info("Write pattern statistic to {}".format(stats_csv))
if __name__ == '__main__':
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
history_stats(os.path.expanduser('~/.hr/chatbot/history'), -1)
history_stats(os.path.expanduser('~/.hr/chatbot/history'), 7)
pattern_stats(os.path.expanduser('~/.hr/chatbot/history'), -1)
| mit |
uberdugo/mlia | Ch05/EXTRAS/plot2D.py | 7 | 1276 | '''
Created on Oct 6, 2010
@author: Peter
'''
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import logRegres
dataMat,labelMat=logRegres.loadDataSet()
dataArr = array(dataMat)
weights = logRegres.stocGradAscent0(dataArr,labelMat)
n = shape(dataArr)[0] #number of points to create
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
markers =[]
colors =[]
for i in range(n):
if int(labelMat[i])== 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.scatter(xcord,ycord, c=colors, s=markers)
type1 = ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
type2 = ax.scatter(xcord2, ycord2, s=30, c='green')
x = arange(-3.0, 3.0, 0.1)
#weights = [-2.9, 0.72, 1.29]
#weights = [-5, 1.09, 1.42]
weights = [13.03822793, 1.32877317, -1.96702074]
weights = [4.12, 0.48, -0.6168]
y = (-weights[0]-weights[1]*x)/weights[2]
type3 = ax.plot(x, y)
#ax.legend([type1, type2, type3], ["Did Not Like", "Liked in Small Doses", "Liked in Large Doses"], loc=2)
#ax.axis([-5000,100000,-2,25])
plt.xlabel('X1')
plt.ylabel('X2')
plt.show() | gpl-3.0 |
potash/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/api/scatter_piecharts.py | 6 | 1194 | """
This example makes custom 'pie charts' as the markers for a scatter plotqu
Thanks to Manuel Metz for the example
"""
import math
import numpy as np
import matplotlib.pyplot as plt
# first define the ratios
r1 = 0.2 # 20%
r2 = r1 + 0.4 # 40%
# define some sizes of the scatter marker
sizes = [60,80,120]
# calculate the points of the first pie marker
#
# these are just the origin (0,0) +
# some points on a circle cos,sin
x = [0] + np.cos(np.linspace(0, 2*math.pi*r1, 10)).tolist()
y = [0] + np.sin(np.linspace(0, 2*math.pi*r1, 10)).tolist()
xy1 = list(zip(x,y))
# ...
x = [0] + np.cos(np.linspace(2*math.pi*r1, 2*math.pi*r2, 10)).tolist()
y = [0] + np.sin(np.linspace(2*math.pi*r1, 2*math.pi*r2, 10)).tolist()
xy2 = list(zip(x,y))
x = [0] + np.cos(np.linspace(2*math.pi*r2, 2*math.pi, 10)).tolist()
y = [0] + np.sin(np.linspace(2*math.pi*r2, 2*math.pi, 10)).tolist()
xy3 = list(zip(x,y))
fig, ax = plt.subplots()
ax.scatter( np.arange(3), np.arange(3), marker=(xy1,0), s=sizes, facecolor='blue' )
ax.scatter( np.arange(3), np.arange(3), marker=(xy2,0), s=sizes, facecolor='green' )
ax.scatter( np.arange(3), np.arange(3), marker=(xy3,0), s=sizes, facecolor='red' )
plt.show()
| mit |
mmottahedi/neuralnilm_prototype | scripts/e249.py | 2 | 3897 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
from copy import deepcopy
from math import sqrt
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e233
based on e131c but with:
* lag=32
* pool
e234
* init final layer and conv layer
235
no lag
236
should be exactly as 131c: no pool, no lag, no init for final and conv layer
237
putting the pool back
238
seems pooling hurts us! disable pooling.
enable lag = 32
239
BLSTM
lag = 20
240
LSTM not BLSTM
various lags
241
output is prediction
ideas for next TODO:
* 3 LSTM layers with smaller conv between them
* why does pooling hurt us?
"""
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.0,
n_seq_per_batch=50,
# subsample_target=5,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=True
#lag=0
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
layers_config=[
{
'type': LSTMLayer,
'num_units': 10,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Normal(std=1.)
}
]
)
def exp_x(name, learning_rate):
global source
try:
a = source
except NameError:
source = RealApplianceSource(**source_dict)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source,
updates=partial(nesterov_momentum, learning_rate=learning_rate)
))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid,
'W': Normal(std=(1/sqrt(50)))
}
)
net = Net(**net_dict_copy)
return net
def main():
for experiment, learning_rate in [('a', 1.0), ('b', 0.1), ('c', 0.01),
('d', 0.001), ('e', 0.0001)]:
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
print("***********************************")
print("Preparing", full_exp_name, "...")
try:
net = exp_x(full_exp_name, learning_rate)
run_experiment(net, path, epochs=1000)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
if __name__ == "__main__":
main()
| mit |
brodeau/aerobulk | python/plot_tests/plot_station_asf.py | 1 | 9926 | #!/usr/bin/env python
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Post-diagnostic of STATION_ASF / L. Brodeau, 2019
import sys
from os import path as path
#from string import replace
import math
import numpy as nmp
from netCDF4 import Dataset,num2date
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
reload(sys)
sys.setdefaultencoding('utf8')
cy1 = '2016' ; # First year
cy2 = '2018' ; # Last year
jt0 = 0
jt0 = 17519
dir_figs='.'
size_fig=(13,7)
fig_ext='png'
clr_red = '#AD0000'
clr_blu = '#3749A3'
clr_gre = '#548F64'
clr_sat = '#ffed00'
clr_mod = '#008ab8'
rDPI=200.
L_ALGOS = [ 'COARE3p6' , 'ECMWF' , 'NCAR' ]
l_xtrns = [ '-noskin' , '-noskin' , '' ] ; # string to add to algo name (L_ALGOS) to get version without skin params turned on
l_color = [ '#ffed00' , '#008ab8' , '0.4' ] ; # colors to differentiate algos on the plot
l_width = [ 3 , 2 , 1 ] ; # line-width to differentiate algos on the plot
l_style = [ '-' , '-' , '--' ] ; # line-style
L_VNEM = [ 'qla' , 'qsb' , 'qt' , 'qlw' , 'taum' , 'dt_skin' ]
L_VARO = [ 'Qlat' , 'Qsen' , 'Qnet' , 'Qlw' , 'Tau' , 'dT_skin' ] ; # name of variable on figure
L_VARL = [ r'$Q_{lat}$', r'$Q_{sens}$' , r'$Q_{net}$' , r'$Q_{lw}$' , r'$|\tau|$' , r'$\Delta T_{skin}$' ] ; # name of variable in latex mode
L_VUNT = [ r'$W/m^2$' , r'$W/m^2$' , r'$W/m^2$' , r'$W/m^2$' , r'$N/m^2$' , 'K' ]
L_VMAX = [ 75. , 75. , 800. , 25. , 1.2 , -0.7 ]
L_VMIN = [ -250. , -125. , -400. , -150. , 0. , 0.7 ]
L_ANOM = [ True , True , True , True , True , False ]
#L_VNEM = [ 'qlw' ]
#L_VARO = [ 'Qlw' ] ; # name of variable on figure
#L_VARL = [ r'$Q_{lw}$' ] ; # name of variable in latex mode
#L_VUNT = [ r'$W/m^2$' ]
#L_VMAX = [ 25. ]
#L_VMIN = [ -150. ]
#L_ANOM = [ True ]
nb_algos = len(L_ALGOS) ; print(nb_algos)
# Getting arguments:
narg = len(sys.argv)
if narg != 2:
print 'Usage: '+sys.argv[0]+' <DIR_OUT_SASF>'; sys.exit(0)
cdir_data = sys.argv[1]
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Populating and checking existence of files to be read
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def chck4f(cf):
cmesg = 'ERROR: File '+cf+' does not exist !!!'
if not path.exists(cf): print cmesg ; sys.exit(0)
###cf_in = nmp.empty((), dtype="S10")
cf_in = [] ; cf_in_ns = []
for ja in range(nb_algos):
cfi = cdir_data+'/output/'+'STATION_ASF-'+L_ALGOS[ja]+'_1h_'+cy1+'0101_'+cy2+'1231_gridT.nc'
chck4f(cfi)
cf_in.append(cfi)
# Same but without skin params:
for ja in range(nb_algos):
cfi = cdir_data+'/output/'+'STATION_ASF-'+L_ALGOS[ja]+l_xtrns[ja]+'_1h_'+cy1+'0101_'+cy2+'1231_gridT.nc'
chck4f(cfi)
cf_in_ns.append(cfi)
print('Files we are goin to use:')
for ja in range(nb_algos): print(cf_in[ja])
print(' --- same without cool-skin/warm-layer:')
for ja in range(nb_algos): print(cf_in_ns[ja])
#-----------------------------------------------------------------
# Getting time array from the first file:
id_in = Dataset(cf_in[0])
vt = id_in.variables['time_counter'][jt0:]
cunit_t = id_in.variables['time_counter'].units
clndr_t = id_in.variables['time_counter'].calendar
id_in.close()
Nt = len(vt)
print(' "time" => units = '+cunit_t+', calendar = "'+clndr_t+'"')
vtime = num2date(vt, units=cunit_t) ; # something understandable!
ii=Nt/300
ib=max(ii-ii%10,1)
xticks_d=int(30*ib)
font_inf = { 'fontname':'Open Sans', 'fontweight':'normal', 'fontsize':14 }
nb_var = len(L_VNEM)
xF = nmp.zeros((Nt,nb_algos))
xFa = nmp.zeros((Nt,nb_algos))
for ctest in ['skin','noskin']:
for jv in range(nb_var):
print('\n *** Treating variable: '+L_VARO[jv]+' ('+ctest+') !')
for ja in range(nb_algos):
#
if ctest == 'skin': id_in = Dataset(cf_in[ja])
if ctest == 'noskin': id_in = Dataset(cf_in_ns[ja])
xF[:,ja] = id_in.variables[L_VNEM[jv]][jt0:,1,1] # only the center point of the 3x3 spatial domain!
if ja == 0: cvar_lnm = id_in.variables[L_VNEM[jv]].long_name
id_in.close()
fig = plt.figure(num = jv, figsize=size_fig, facecolor='w', edgecolor='k')
ax1 = plt.axes([0.07, 0.22, 0.9, 0.75])
ax1.set_xticks(vtime[::xticks_d])
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.xticks(rotation='60')
for ja in range(nb_algos):
plt.plot(vtime, xF[:,ja], '-', color=l_color[ja], linestyle=l_style[ja], linewidth=l_width[ja], label=L_ALGOS[ja], zorder=10+ja)
ax1.set_ylim(L_VMIN[jv], L_VMAX[jv]) ; ax1.set_xlim(vtime[0],vtime[Nt-1])
plt.ylabel(L_VARL[jv]+' ['+L_VUNT[jv]+']')
ax1.grid(color='k', linestyle='-', linewidth=0.3)
plt.legend(bbox_to_anchor=(0.45, 0.2), ncol=1, shadow=True, fancybox=True)
ax1.annotate(cvar_lnm+' ('+ctest+')', xy=(0.3, 0.97), xycoords='axes fraction', bbox={'facecolor':'w', 'alpha':1., 'pad':10}, zorder=50, **font_inf)
plt.savefig(L_VARO[jv]+'_'+ctest+'.'+fig_ext, dpi=int(rDPI), transparent=False)
plt.close(jv)
if L_ANOM[jv]:
for ja in range(nb_algos): xFa[:,ja] = xF[:,ja] - nmp.mean(xF,axis=1)
if nmp.sum(xFa[:,:]) == 0.0:
print(' Well! Seems that for variable '+L_VARO[jv]+', choice of algo has no impact a all!')
print(' ==> skipping anomaly plot...')
else:
# Want a symetric y-range that makes sense for the anomaly we're looking at:
rmax = nmp.max(xFa) ; rmin = nmp.min(xFa)
rmax = max( abs(rmax) , abs(rmin) )
romagn = math.floor(math.log(rmax, 10)) ; # order of magnitude of the anomaly we're dealing with
rmlt = 10.**(int(romagn)) / 2.
yrng = math.copysign( math.ceil(abs(rmax)/rmlt)*rmlt , rmax)
#print 'yrng = ', yrng ; #sys.exit(0)
fig = plt.figure(num = 10+jv, figsize=size_fig, facecolor='w', edgecolor='k')
ax1 = plt.axes([0.07, 0.22, 0.9, 0.75])
ax1.set_xticks(vtime[::xticks_d])
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.xticks(rotation='60')
for ja in range(nb_algos):
plt.plot(vtime, xFa[:,ja], '-', color=l_color[ja], linewidth=l_width[ja], label=L_ALGOS[ja], zorder=10+ja)
ax1.set_ylim(-yrng,yrng) ; ax1.set_xlim(vtime[0],vtime[Nt-1])
plt.ylabel(L_VARL[jv]+' ['+L_VUNT[jv]+']')
ax1.grid(color='k', linestyle='-', linewidth=0.3)
plt.legend(bbox_to_anchor=(0.45, 0.2), ncol=1, shadow=True, fancybox=True)
ax1.annotate('Anomaly of '+cvar_lnm+' ('+ctest+')', xy=(0.3, 0.97), xycoords='axes fraction', bbox={'facecolor':'w', 'alpha':1., 'pad':10}, zorder=50, **font_inf)
plt.savefig(L_VARO[jv]+'_'+ctest+'_anomaly.'+fig_ext, dpi=int(rDPI), transparent=False)
plt.close(10+jv)
# Difference skin vs noskin:
xFns = nmp.zeros((Nt,nb_algos))
for jv in range(nb_var-1):
print('\n *** Treating variable: '+L_VARO[jv]+' ('+ctest+') !')
for ja in range(nb_algos-1):
id_in = Dataset(cf_in[ja])
xF[:,ja] = id_in.variables[L_VNEM[jv]][jt0:,1,1] # only the center point of the 3x3 spatial domain!
if ja == 0: cvar_lnm = id_in.variables[L_VNEM[jv]].long_name
id_in.close()
#
id_in = Dataset(cf_in_ns[ja])
xFns[:,ja] = id_in.variables[L_VNEM[jv]][jt0:,1,1] # only the center point of the 3x3 spatial domain!
if ja == 0: cvar_lnm = id_in.variables[L_VNEM[jv]].long_name
id_in.close()
xFa[:,ja] = xF[:,ja] - xFns[:,ja] ; # difference!
# Want a symetric y-range that makes sense for the anomaly we're looking at:
rmax = nmp.max(xFa) ; rmin = nmp.min(xFa)
rmax = max( abs(rmax) , abs(rmin) )
romagn = math.floor(math.log(rmax, 10)) ; # order of magnitude of the anomaly we're dealing with
rmlt = 10.**(int(romagn)) / 2.
yrng = math.copysign( math.ceil(abs(rmax)/rmlt)*rmlt , rmax)
print 'yrng = ', yrng ; #sys.exit(0)
for ja in range(nb_algos-1):
calgo = L_ALGOS[ja]
if nmp.sum(xFa[:,ja]) == 0.0:
print(' Well! Seems that for variable '+L_VARO[jv]+', and algo '+calgo+', skin param has no impact')
print(' ==> skipping difference plot...')
else:
fig = plt.figure(num = jv, figsize=size_fig, facecolor='w', edgecolor='k')
ax1 = plt.axes([0.07, 0.22, 0.9, 0.75])
ax1.set_xticks(vtime[::xticks_d])
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M:%S'))
plt.xticks(rotation='60')
plt.plot(vtime, xFa[:,ja], '-', color=l_color[ja], linestyle=l_style[ja], linewidth=l_width[ja], label=None, zorder=10+ja)
ax1.set_ylim(-yrng,yrng) ; ax1.set_xlim(vtime[0],vtime[Nt-1])
plt.ylabel(L_VARL[jv]+' ['+L_VUNT[jv]+']')
ax1.grid(color='k', linestyle='-', linewidth=0.3)
#plt.legend(bbox_to_anchor=(0.45, 0.2), ncol=1, shadow=True, fancybox=True)
ax1.annotate(cvar_lnm+' ('+ctest+')', xy=(0.3, 0.97), xycoords='axes fraction', bbox={'facecolor':'w', 'alpha':1., 'pad':10}, zorder=50, **font_inf)
plt.savefig('diff_skin-noskin_'+L_VARO[jv]+'_'+calgo+'_'+ctest+'.'+fig_ext, dpi=int(rDPI), transparent=False)
plt.close(jv)
| gpl-3.0 |
CharlesShang/TFFRCNN | lib/roi_data_layer/minibatch.py | 5 | 8725 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
import cv2
import os
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
# <<<< obsolete
from ..utils.blob import prep_im_for_blob, im_list_to_blob
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
if cfg.TRAIN.HAS_RPN:
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['gt_ishard'] = roidb[0]['gt_ishard'][gt_inds] \
if 'gt_ishard' in roidb[0] else np.zeros(gt_inds.size, dtype=int)
# blobs['gt_ishard'] = roidb[0]['gt_ishard'][gt_inds]
blobs['dontcare_areas'] = roidb[0]['dontcare_areas'] * im_scales[0] \
if 'dontcare_areas' in roidb[0] else np.zeros([0, 4], dtype=float)
blobs['im_info'] = np.array(
[[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],
dtype=np.float32)
blobs['im_name'] = os.path.basename(roidb[0]['image'])
else: # not using RPN
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_inside_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
# all_overlaps = []
for im_i in xrange(num_images):
labels, overlaps, im_rois, bbox_targets, bbox_inside_weights \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_inside_blob = np.vstack((bbox_inside_blob, bbox_inside_weights))
# all_overlaps = np.hstack((all_overlaps, overlaps))
# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)
blobs['rois'] = rois_blob
blobs['labels'] = labels_blob
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_inside_weights'] = bbox_inside_blob
blobs['bbox_outside_weights'] = \
np.array(bbox_inside_blob > 0).astype(np.float32)
return blobs
def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(
fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(
bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]
bbox_targets, bbox_inside_weights = _get_bbox_regression_labels(
roidb['bbox_targets'][keep_inds, :], num_classes)
return labels, overlaps, rois, bbox_targets, bbox_inside_weights
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
for i in xrange(rois_blob.shape[0]):
rois = rois_blob[i, :]
im_ind = rois[0]
roi = rois[1:]
im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
cls = labels_blob[i]
plt.imshow(im)
print 'class: ', cls, ' overlap: ', overlaps[i]
plt.gca().add_patch(
plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
| mit |
robcarver17/pysystemtrade | sysproduction/reporting/trades_report.py | 1 | 16443 | from copy import copy
from collections import namedtuple
import datetime
import numpy as np
import pandas as pd
from syscore.genutils import transfer_object_attributes
from syscore.pdutils import make_df_from_list_of_named_tuple
from syscore.objects import header, table, body_text, arg_not_supplied, missing_data
from sysdata.data_blob import dataBlob
from sysproduction.data.orders import dataOrders
from sysproduction.data.instruments import diagInstruments
from sysproduction.reporting.risk_report import get_current_annualised_stdev_for_instrument
def trades_info(
data=arg_not_supplied,
calendar_days_back=1,
end_date=arg_not_supplied,
start_date=arg_not_supplied,
):
"""
Report on system status
:param: data blob
:return: list of formatted output items
"""
if data is arg_not_supplied:
data = dataBlob()
if end_date is arg_not_supplied:
end_date = datetime.datetime.now()
if start_date is arg_not_supplied:
start_date = end_date - datetime.timedelta(days=calendar_days_back)
results_object = get_trades_report_data(
data, start_date=start_date, end_date=end_date
)
formatted_output = format_trades_data(results_object)
return formatted_output
def get_trades_report_data(data, start_date, end_date):
broker_orders = get_recent_broker_orders(data, start_date, end_date)
if len(broker_orders) == 0:
empty_df = pd.DataFrame()
results_object = dict(overview=empty_df)
return results_object
overview = broker_orders[
[
"instrument_code",
"strategy_name",
"contract_date",
"fill_datetime",
"fill",
"filled_price",
]
]
delays = create_delay_df(broker_orders)
raw_slippage = create_raw_slippage_df(broker_orders)
vol_slippage = create_vol_norm_slippage_df(raw_slippage, data)
cash_slippage = create_cash_slippage_df(raw_slippage, data)
summary_dict = {}
item_list = [
"delay",
"bid_ask",
"execution",
"versus_limit",
"versus_parent_limit",
"total_trading",
]
detailed_raw_results = get_stats_for_slippage_groups(
raw_slippage, item_list)
summary_dict.update(detailed_raw_results)
item_list = [
"delay_vol",
"bid_ask_vol",
"execution_vol",
"versus_limit_vol",
"versus_parent_limit_vol",
"total_trading_vol",
]
detailed_vol_results = get_stats_for_slippage_groups(
vol_slippage, item_list)
summary_dict.update(detailed_vol_results)
item_list = [
"delay_cash",
"bid_ask_cash",
"execution_cash",
"versus_limit_cash",
"versus_parent_limit_cash",
"total_trading_cash",
]
detailed_cash_results = get_stats_for_slippage_groups(
cash_slippage, item_list)
summary_dict.update(detailed_cash_results)
results_object = dict(
overview=overview,
delays=delays,
raw_slippage=raw_slippage,
vol_slippage=vol_slippage,
cash_slippage=cash_slippage,
summary_dict=summary_dict,
)
return results_object
def format_trades_data(results_object):
"""
Put the results into a printable format
:param results_dict: dict, keys are different segments
:return:
"""
formatted_output = []
formatted_output.append(
header("Trades report produced on %s" % (str(datetime.datetime.now())))
)
if len(results_object["overview"]) == 0:
formatted_output.append(body_text("No trades in relevant period"))
return formatted_output
table1_df = results_object["overview"]
table1 = table("Broker orders", table1_df)
formatted_output.append(table1)
table2_df = results_object["delays"]
table2 = table("Delays", table2_df)
formatted_output.append(table2)
table3_df = results_object["raw_slippage"]
table3 = table("Slippage (ticks per lot)", table3_df)
formatted_output.append(table3)
table4_df = results_object["vol_slippage"]
table4 = table(
"Slippage (normalised by annual vol, BP of annual SR)",
table4_df)
formatted_output.append(table4)
table5_df = results_object["cash_slippage"]
table5 = table("Slippage (In base currency)", table5_df)
formatted_output.append(table5)
summary_results_dict = results_object["summary_dict"]
for summary_table_name, summary_table_item in summary_results_dict.items():
summary_table = table(
"Summary %s" %
summary_table_name,
summary_table_item)
formatted_output.append(summary_table)
return formatted_output
tradesData = namedtuple(
"tradesData",
[
"order_id",
"instrument_code",
"strategy_name",
"contract_date",
"fill",
"filled_price",
"mid_price",
"side_price",
"offside_price",
"parent_reference_price", # from contract order
"parent_reference_datetime", # from instrument order
"submit_datetime",
"fill_datetime",
"limit_price",
"trade",
"buy_or_sell",
"parent_limit_price",
"commission",
],
)
data = dataBlob()
def get_recent_broker_orders(data, start_date, end_date):
data_orders = dataOrders(data)
order_id_list = data_orders.get_historic_broker_order_ids_in_date_range(
start_date, end_date
)
orders_as_list = [get_tuple_object_from_order_id(
data, order_id) for order_id in order_id_list]
pdf = make_df_from_list_of_named_tuple(tradesData, orders_as_list)
return pdf
def get_tuple_object_from_order_id(data, order_id):
data_orders = dataOrders(data)
order = data_orders.get_historic_broker_order_from_order_id_with_execution_data(
order_id)
tuple_object = transfer_object_attributes(tradesData, order)
return tuple_object
def create_delay_df(broker_orders):
delay_data_as_list = [
delay_row(
broker_orders.iloc[irow]) for irow in range(
len(broker_orders))]
delay_data_df = pd.concat(delay_data_as_list, axis=1)
delay_data_df = delay_data_df.transpose()
delay_data_df.index = broker_orders.index
return delay_data_df
def delay_row(order_row):
submit_minus_generated, filled_minus_submit = delay_calculations_for_order_row(
order_row)
new_order_row = copy(order_row)
new_order_row = new_order_row[
[
"instrument_code",
"strategy_name",
"parent_reference_datetime",
"submit_datetime",
"fill_datetime",
]
]
new_order_row = new_order_row.append(
pd.Series(
[submit_minus_generated, filled_minus_submit],
index=["submit_minus_generated", "filled_minus_submit"],
)
)
return new_order_row
def delay_calculations_for_order_row(order_row):
submit_minus_generated = delay_calc(
order_row.parent_reference_datetime, order_row.submit_datetime
)
filled_minus_submit = delay_calc(
order_row.submit_datetime,
order_row.fill_datetime)
return submit_minus_generated, filled_minus_submit
def delay_calc(first_time, second_time):
if first_time is None or second_time is None:
return np.nan
time_diff = second_time - first_time
time_diff_seconds = time_diff.total_seconds()
if time_diff_seconds < 0:
return np.nan
return time_diff_seconds
def create_raw_slippage_df(broker_orders):
raw_slippage_data_as_list = [
raw_slippage_row(
broker_orders.iloc[irow]) for irow in range(
len(broker_orders))]
raw_slippage_df = pd.concat(raw_slippage_data_as_list, axis=1)
raw_slippage_df = raw_slippage_df.transpose()
raw_slippage_df.index = broker_orders.index
return raw_slippage_df
def raw_slippage_row(order_row):
(
delay,
bid_ask,
execution,
versus_limit,
versus_parent_limit,
total_trading,
) = price_calculations_for_order_row(order_row)
new_order_row = copy(order_row)
new_order_row = new_order_row[
[
"instrument_code",
"strategy_name",
"trade",
"parent_reference_price",
"parent_limit_price",
"mid_price",
"side_price",
"offside_price",
"limit_price",
"filled_price",
]
]
new_order_row = new_order_row.append(
pd.Series(
[
delay,
bid_ask,
execution,
versus_limit,
versus_parent_limit,
total_trading,
],
index=[
"delay",
"bid_ask",
"execution",
"versus_limit",
"versus_parent_limit",
"total_trading",
],
)
)
return new_order_row
def price_calculations_for_order_row(order_row):
buying_multiplier = order_row.buy_or_sell
# Following are always floats: parent_reference_price, limit_price,
# calculated_mid_price, calculated_side_price, fill_price
delay = price_slippage(
buying_multiplier,
order_row.parent_reference_price,
order_row.mid_price,
)
bid_ask = price_slippage(
buying_multiplier,
order_row.mid_price,
order_row.side_price,
)
execution = price_slippage(
buying_multiplier,
order_row.side_price,
order_row.filled_price,
)
total_trading = bid_ask + execution
versus_limit = price_slippage(
buying_multiplier,
order_row.limit_price,
order_row.filled_price)
versus_parent_limit = price_slippage(
buying_multiplier,
order_row.parent_limit_price,
order_row.filled_price,
)
return delay, bid_ask, execution, versus_limit, versus_parent_limit, total_trading
def price_slippage(buying_multiplier, first_price, second_price):
# Slippage is always negative (bad) positive (good)
# This will return a negative number if second price is adverse versus
# first price
if first_price is None or second_price is None:
return np.nan
# 1 if buying, -1 if selling
# if buying, want second price to be lower than first
# if selling, want second price to be higher than first
slippage = buying_multiplier * (first_price - second_price)
return slippage
def create_cash_slippage_df(raw_slippage, data):
# What does this slippage mean in money terms
cash_slippage_data_as_list = [
cash_slippage_row(raw_slippage.iloc[irow], data)
for irow in range(len(raw_slippage))
]
cash_slippage_df = pd.concat(cash_slippage_data_as_list, axis=1)
cash_slippage_df = cash_slippage_df.transpose()
cash_slippage_df.index = raw_slippage.index
return cash_slippage_df
def cash_slippage_row(slippage_row, data):
# rewrite
(
delay_cash,
bid_ask_cash,
execution_cash,
versus_limit_cash,
versus_parent_limit_cash,
total_trading_cash,
value_of_price_point,
) = cash_calculations_for_slippage_row(slippage_row, data)
new_slippage_row = copy(slippage_row)
new_slippage_row = new_slippage_row[
[
"instrument_code",
"strategy_name",
"trade",
]
]
new_slippage_row = new_slippage_row.append(
pd.Series(
[
value_of_price_point,
delay_cash,
bid_ask_cash,
execution_cash,
versus_limit_cash,
versus_parent_limit_cash,
total_trading_cash,
],
index=[
"value_of_price_point",
"delay_cash",
"bid_ask_cash",
"execution_cash",
"versus_limit_cash",
"versus_parent_limit_cash",
"total_trading_cash",
],
)
)
return new_slippage_row
def cash_calculations_for_slippage_row(slippage_row, data):
# What's a tick worth in base currency?
diag_instruments = diagInstruments(data)
value_of_price_point = diag_instruments.get_point_size_base_currency(
slippage_row.instrument_code
)
input_items = [
"delay",
"bid_ask",
"execution",
"versus_limit",
"versus_parent_limit",
"total_trading",
]
output = [value_of_price_point * slippage_row[input_name]
for input_name in input_items]
return tuple(output + [value_of_price_point])
def create_vol_norm_slippage_df(raw_slippage, data):
# What does this slippage mean in vol normalised terms
for irow in range(len(raw_slippage)):
vol_slippage_row(raw_slippage.iloc[irow], data)
vol_slippage_data_as_list = [
vol_slippage_row(raw_slippage.iloc[irow], data)
for irow in range(len(raw_slippage))
]
vol_slippage_df = pd.concat(vol_slippage_data_as_list, axis=1)
vol_slippage_df = vol_slippage_df.transpose()
vol_slippage_df.index = raw_slippage.index
return vol_slippage_df
def vol_slippage_row(slippage_row, data):
# rewrite
(
vol_delay,
vol_bid_ask,
vol_execution,
vol_versus_limit,
vol_versus_parent_limit,
total_trading_vol,
last_annual_vol,
) = vol_calculations_for_slippage_row(slippage_row, data)
new_slippage_row = copy(slippage_row)
new_slippage_row = new_slippage_row[
[
"instrument_code",
"strategy_name",
"trade",
]
]
new_slippage_row = new_slippage_row.append(
pd.Series(
[
last_annual_vol,
vol_delay,
vol_bid_ask,
vol_execution,
vol_versus_limit,
vol_versus_parent_limit,
total_trading_vol,
],
index=[
"last_annual_vol",
"delay_vol",
"bid_ask_vol",
"execution_vol",
"versus_limit_vol",
"versus_parent_limit_vol",
"total_trading_vol",
],
)
)
return new_slippage_row
def vol_calculations_for_slippage_row(slippage_row, data):
last_annual_vol = get_last_annual_vol_for_slippage_row(slippage_row, data)
input_items = [
"delay",
"bid_ask",
"execution",
"versus_limit",
"versus_parent_limit",
"total_trading",
]
output = [10000 * slippage_row[input_name] /
last_annual_vol for input_name in input_items]
return tuple(output + [last_annual_vol])
def get_last_annual_vol_for_slippage_row(slippage_row, data):
instrument_code = slippage_row.instrument_code
last_annual_vol = get_current_annualised_stdev_for_instrument(data,
instrument_code)
return last_annual_vol
def get_stats_for_slippage_groups(df_to_process, item_list):
results = {}
for item_name in item_list:
sum_data = df_to_process.groupby(
["strategy_name", "instrument_code"]).agg({item_name: "sum"})
count_data = df_to_process.groupby(
["strategy_name", "instrument_code"]).agg({item_name: "count"})
avg_data = sum_data / count_data
try:
std = df_to_process.groupby(
["strategy_name", "instrument_code"]).agg({item_name: "std"})
except pd.core.base.DataError:
# not enough items to calculate standard deviation
std = np.nan
lower_range = avg_data + (-2 * std)
upper_range = avg_data + (2 * std)
results[item_name + " Sum"] = sum_data
results[item_name + " Count"] = count_data
results[item_name + " Mean"] = avg_data
results[item_name + " Lower range"] = lower_range
results[item_name + " Upper range"] = upper_range
total_sum_data = df_to_process.groupby(["strategy_name"]).agg(
{item_name: "sum"}
)
results[item_name + " Total Sum"] = total_sum_data
return results
| gpl-3.0 |
alekz112/statsmodels | statsmodels/formula/tests/test_formula.py | 29 | 4647 | from statsmodels.compat.python import iteritems, StringIO
import warnings
from statsmodels.formula.api import ols
from statsmodels.formula.formulatools import make_hypotheses_matrices
from statsmodels.tools import add_constant
from statsmodels.datasets.longley import load, load_pandas
import numpy.testing as npt
from statsmodels.tools.testing import assert_equal
from numpy.testing.utils import WarningManager
longley_formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
class CheckFormulaOLS(object):
@classmethod
def setupClass(cls):
cls.data = load()
def test_endog_names(self):
assert self.model.endog_names == 'TOTEMP'
def test_exog_names(self):
assert self.model.exog_names == ['Intercept', 'GNPDEFL', 'GNP',
'UNEMP', 'ARMED', 'POP', 'YEAR']
def test_design(self):
npt.assert_equal(self.model.exog,
add_constant(self.data.exog, prepend=True))
def test_endog(self):
npt.assert_equal(self.model.endog, self.data.endog)
def test_summary(self):
# smoke test
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.filterwarnings("ignore",
"kurtosistest only valid for n>=20")
self.model.fit().summary()
finally:
warn_ctx.__exit__()
class TestFormulaPandas(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = load_pandas().data
cls.model = ols(longley_formula, data)
super(TestFormulaPandas, cls).setupClass()
class TestFormulaDict(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = dict((k, v.tolist()) for k, v in iteritems(load_pandas().data))
cls.model = ols(longley_formula, data)
super(TestFormulaDict, cls).setupClass()
class TestFormulaRecArray(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = load().data
cls.model = ols(longley_formula, data)
super(TestFormulaRecArray, cls).setupClass()
def test_tests():
formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
dta = load_pandas().data
results = ols(formula, dta).fit()
test_formula = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)'
LC = make_hypotheses_matrices(results, test_formula)
R = LC.coefs
Q = LC.constants
npt.assert_almost_equal(R, [[0, 1, -1, 0, 0, 0, 0],
[0, 0 , 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1./1829]], 8)
npt.assert_array_equal(Q, [[0],[2],[1]])
def test_formula_labels():
# make sure labels pass through patsy as expected
# data(Duncan) from car in R
dta = StringIO(""""type" "income" "education" "prestige"\n"accountant" "prof" 62 86 82\n"pilot" "prof" 72 76 83\n"architect" "prof" 75 92 90\n"author" "prof" 55 90 76\n"chemist" "prof" 64 86 90\n"minister" "prof" 21 84 87\n"professor" "prof" 64 93 93\n"dentist" "prof" 80 100 90\n"reporter" "wc" 67 87 52\n"engineer" "prof" 72 86 88\n"undertaker" "prof" 42 74 57\n"lawyer" "prof" 76 98 89\n"physician" "prof" 76 97 97\n"welfare.worker" "prof" 41 84 59\n"teacher" "prof" 48 91 73\n"conductor" "wc" 76 34 38\n"contractor" "prof" 53 45 76\n"factory.owner" "prof" 60 56 81\n"store.manager" "prof" 42 44 45\n"banker" "prof" 78 82 92\n"bookkeeper" "wc" 29 72 39\n"mail.carrier" "wc" 48 55 34\n"insurance.agent" "wc" 55 71 41\n"store.clerk" "wc" 29 50 16\n"carpenter" "bc" 21 23 33\n"electrician" "bc" 47 39 53\n"RR.engineer" "bc" 81 28 67\n"machinist" "bc" 36 32 57\n"auto.repairman" "bc" 22 22 26\n"plumber" "bc" 44 25 29\n"gas.stn.attendant" "bc" 15 29 10\n"coal.miner" "bc" 7 7 15\n"streetcar.motorman" "bc" 42 26 19\n"taxi.driver" "bc" 9 19 10\n"truck.driver" "bc" 21 15 13\n"machine.operator" "bc" 21 20 24\n"barber" "bc" 16 26 20\n"bartender" "bc" 16 28 7\n"shoe.shiner" "bc" 9 17 3\n"cook" "bc" 14 22 16\n"soda.clerk" "bc" 12 30 6\n"watchman" "bc" 17 25 11\n"janitor" "bc" 7 20 8\n"policeman" "bc" 34 47 41\n"waiter" "bc" 8 32 10""")
from pandas import read_table
dta = read_table(dta, sep=" ")
model = ols("prestige ~ income + education", dta).fit()
assert_equal(model.fittedvalues.index, dta.index)
def test_formula_predict():
from numpy import log
formula = """TOTEMP ~ log(GNPDEFL) + log(GNP) + UNEMP + ARMED +
POP + YEAR"""
data = load_pandas()
dta = load_pandas().data
results = ols(formula, dta).fit()
npt.assert_almost_equal(results.fittedvalues.values,
results.predict(data.exog), 8)
| bsd-3-clause |
kubaszostak/gdal-dragndrop | osgeo/apps/Python27/Lib/site-packages/numpy/lib/twodim_base.py | 2 | 27339 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
import functools
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def _flip_dispatcher(m):
return (m,)
@array_function_dispatch(_flip_dispatcher)
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
@array_function_dispatch(_flip_dispatcher)
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
@set_module('numpy')
def eye(N, M=None, k=0, dtype=float, order='C'):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def _diag_dispatcher(v, k=None):
return (v,)
@array_function_dispatch(_diag_dispatcher)
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
@array_function_dispatch(_diag_dispatcher)
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
@set_module('numpy')
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def _trilu_dispatcher(m, k=None):
return (m,)
@array_function_dispatch(_trilu_dispatcher)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
@array_function_dispatch(_trilu_dispatcher)
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
def _vander_dispatcher(x, N=None, increasing=None):
return (x,)
# Originally borrowed from John Hunter and matplotlib
@array_function_dispatch(_vander_dispatcher)
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def _histogram2d_dispatcher(x, y, bins=None, range=None, normed=None,
weights=None, density=None):
return (x, y, bins, weights)
@array_function_dispatch(_histogram2d_dispatcher)
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
density=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_area``.
normed : bool, optional
An alias for the density argument that behaves identically. To avoid
confusion with the broken normed argument to `histogram`, `density`
should be preferred.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx+1,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny+1,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> from matplotlib.image import NonUniformImage
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights, density)
return hist, edges[0], edges[1]
@set_module('numpy')
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return nonzero(a != 0)
@set_module('numpy')
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return nonzero(tri(n, m, k=k, dtype=bool))
def _trilu_indices_form_dispatcher(arr, k=None):
return (arr,)
@array_function_dispatch(_trilu_indices_form_dispatcher)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
@set_module('numpy')
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return nonzero(~tri(n, m, k=k-1, dtype=bool))
@array_function_dispatch(_trilu_indices_form_dispatcher)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| mit |
kaichogami/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
versae/DH2304 | data/arts1.py | 1 | 1038 | import numpy as np
import pandas as pd
arts = pd.DataFrame()
# Clean the dates so you only see numbers.
def clean_years(value):
result = value
chars_to_replace = ["c.", "©", ", CARCC", "no date", "n.d.", " SODRAC", ", CA", " CARCC", ""]
chars_to_split = ["-", "/"]
if isinstance(result, str):
for char in chars_to_split:
if char in result:
result = result.split(char)[1].strip()
for char in chars_to_replace:
result = result.replace(char, "")
if result == "":
return np.nan
else:
return int(result)
else:
return result
arts['execution_date'] = arts['execution_date'].apply(clean_years)
arts.head()
# If a year is lower than 100, then is referred to 1900. For example, 78 is actually 1978, and that needs to be fixed too.
def clean_year_99(value):
if value < 100:
return value + 1900
else:
return value
arts["execution_date"] = arts["execution_date"].apply(clean_year_99)
arts.head()
| mit |
john5223/airflow | airflow/hooks/hive_hooks.py | 17 | 14064 | from __future__ import print_function
from builtins import zip
from past.builtins import basestring
import csv
import logging
import subprocess
from tempfile import NamedTemporaryFile
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from hive_service import ThriftHive
import pyhs2
from airflow.utils import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils import TemporaryDirectory
class HiveCliHook(BaseHook):
"""
Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default"):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.conn = conn
def run_cli(self, hql, schema=None):
"""
Run an hql statement using the hive cli
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(**locals())
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(hql)
f.flush()
fname = f.name
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = (
"jdbc:hive2://"
"{0}:{1}/{2}"
";auth=noSasl"
).format(conn.host, conn.port, conn.schema)
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
cmd_extra += ['-p', conn.login]
hive_cmd = [hive_bin, '-f', fname] + cmd_extra
if self.hive_cli_params:
hive_params_list = self.hive_cli_params.split()
hive_cmd.extend(hive_params_list)
logging.info(" ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir)
all_err = ''
self.sp = sp
stdout = ''
for line in iter(sp.stdout.readline, ''):
stdout += line
logging.info(line.strip())
sp.wait()
if sp.returncode:
raise AirflowException(all_err)
return stdout
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param delimiter: field delimiter in the file
:type delimiter: str
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n"
if create or recreate:
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile;"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} "
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals});"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.kill()
class HiveMetastoreHook(BaseHook):
'''
Wrapper to interact with the Hive Metastore
'''
def __init__(self, metastore_conn_id='metastore_default'):
self.metastore_conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
'''
Returns a Hive thrift client.
'''
ms = self.metastore_conn
transport = TSocket.TSocket(ms.host, ms.port)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return ThriftHive.Client(protocol)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
'''
Checks whether a partition exists
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
'''
self.metastore._oprot.trans.open()
partitions = self.metastore.get_partitions_by_filter(
schema, table, partition, 1)
self.metastore._oprot.trans.close()
if partitions:
return True
else:
return False
def get_table(self, table_name, db='default'):
'''
Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
'''
self.metastore._oprot.trans.open()
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
table = self.metastore.get_table(dbname=db, tbl_name=table_name)
self.metastore._oprot.trans.close()
return table
def get_tables(self, db, pattern='*'):
'''
Get a metastore table object
'''
self.metastore._oprot.trans.open()
tables = self.metastore.get_tables(db_name=db, pattern=pattern)
objs = self.metastore.get_table_objects_by_name(db, tables)
self.metastore._oprot.trans.close()
return objs
def get_databases(self, pattern='*'):
'''
Get a metastore table object
'''
self.metastore._oprot.trans.open()
dbs = self.metastore.get_databases(pattern)
self.metastore._oprot.trans.close()
return dbs
def get_partitions(
self, schema, table_name, filter=None):
'''
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
'''
self.metastore._oprot.trans.open()
table = self.metastore.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = self.metastore.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=32767)
else:
parts = self.metastore.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=32767)
self.metastore._oprot.trans.close()
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
def max_partition(self, schema, table_name, field=None, filter=None):
'''
Returns the maximum value for all partitions in a table. Works only
for tables that have a single partition key. For subpartitioned
table, we recommend using signal tables.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow', table_name=t)
'2015-01-01'
'''
parts = self.get_partitions(schema, table_name, filter)
if not parts:
return None
elif len(parts[0]) == 1:
field = list(parts[0].keys())[0]
elif not field:
raise AirflowException(
"Please specify the field you want the max "
"value for")
return max([p[field] for p in parts])
class HiveServer2Hook(BaseHook):
'''
Wrapper around the pyhs2 library
Note that the default authMechanism is NOSASL, to override it you
can specify it in the ``extra`` of your connection in the UI as in
``{"authMechanism": "PLAIN"}``. Refer to the pyhs2 for more details.
'''
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self):
db = self.get_connection(self.hiveserver2_conn_id)
return pyhs2.connect(
host=db.host,
port=db.port,
authMechanism=db.extra_dejson.get('authMechanism', 'NOSASL'),
user=db.login,
database=db.schema or 'default')
def get_results(self, hql, schema='default', arraysize=1000):
with self.get_conn() as conn:
if isinstance(hql, basestring):
hql = [hql]
results = {
'data': [],
'header': [],
}
for statement in hql:
with conn.cursor() as cur:
cur.execute(statement)
records = cur.fetchall()
if records:
results = {
'data': records,
'header': cur.getSchema(),
}
return results
def to_csv(self, hql, csv_filepath, schema='default'):
schema = schema or 'default'
with self.get_conn() as conn:
with conn.cursor() as cur:
logging.info("Running query: " + hql)
cur.execute(hql)
schema = cur.getSchema()
with open(csv_filepath, 'w') as f:
writer = csv.writer(f)
writer.writerow([c['columnName'] for c in cur.getSchema()])
i = 0
while cur.hasMoreRows:
rows = [row for row in cur.fetchmany() if row]
writer.writerows(rows)
i += len(rows)
logging.info("Written {0} rows so far.".format(i))
logging.info("Done. Loaded a total of {0} rows.".format(i))
def get_records(self, hql, schema='default'):
'''
Get a set of records from a Hive query.
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
'''
return self.get_results(hql, schema=schema)['data']
def get_pandas_df(self, hql, schema='default'):
'''
Get a pandas dataframe from a Hive query
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
'''
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c['columnName'] for c in res['header']]
return df
| apache-2.0 |
ningchi/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
JamesDickenson/aima-python | submissions/aartiste/myNN.py | 4 | 3659 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.aartiste import election
from submissions.aartiste import county_demographics
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
trumpECHP = DataFrame()
'''
Extract data from the CORGIS elections, and merge it with the
CORGIS demographics. Both data sets are organized by county and state.
'''
joint = {}
elections = election.get_results()
for county in elections:
try:
st = county['Location']['State Abbreviation']
countyST = county['Location']['County'] + st
trump = county['Vote Data']['Donald Trump']['Percent of Votes']
joint[countyST] = {}
joint[countyST]['ST']= st
joint[countyST]['Trump'] = trump
except:
traceback.print_exc()
demographics = county_demographics.get_all_counties()
for county in demographics:
try:
countyNames = county['County'].split()
cName = ' '.join(countyNames[:-1])
st = county['State']
countyST = cName + st
# elderly =
# college =
# home =
# poverty =
if countyST in joint:
joint[countyST]['Elderly'] = county['Age']["Percent 65 and Older"]
joint[countyST]['HighSchool'] = county['Education']["High School or Higher"]
joint[countyST]['College'] = county['Education']["Bachelor's Degree or Higher"]
joint[countyST]['White'] = county['Ethnicities']["White Alone, not Hispanic or Latino"]
joint[countyST]['Persons'] = county['Housing']["Persons per Household"]
joint[countyST]['Home'] = county['Housing']["Homeownership Rate"]
joint[countyST]['Income'] = county['Income']["Median Houseold Income"]
joint[countyST]['Poverty'] = county['Income']["Persons Below Poverty Level"]
joint[countyST]['Sales'] = county['Sales']["Retail Sales per Capita"]
except:
traceback.print_exc()
'''
Remove the counties that did not appear in both samples.
'''
intersection = {}
for countyST in joint:
if 'College' in joint[countyST]:
intersection[countyST] = joint[countyST]
trumpECHP.data = []
'''
Build the input frame, row by row.
'''
for countyST in intersection:
# choose the input values
row = []
for key in intersection[countyST]:
if key in ['ST', 'Trump']:
continue
row.append(intersection[countyST][key])
trumpECHP.data.append(row)
firstCounty = next(iter(intersection.keys()))
firstRow = intersection[firstCounty]
trumpECHP.feature_names = list(firstRow.keys())
trumpECHP.feature_names.remove('ST')
trumpECHP.feature_names.remove('Trump')
'''
Build the target list,
one entry for each row in the input frame.
The Naive Bayesian network is a classifier,
i.e. it sorts data points into bins.
The best it can do to estimate a continuous variable
is to break the domain into segments, and predict
the segment into which the variable's value will fall.
In this example, I'm breaking Trump's % into two
arbitrary segments.
'''
trumpECHP.target = []
def trumpTarget(percentage):
if percentage > 45:
return 1
return 0
for countyST in intersection:
# choose the target
tt = trumpTarget(intersection[countyST]['Trump'])
trumpECHP.target.append(tt)
trumpECHP.target_names = [
'Trump <= 45%',
'Trump > 45%',
]
mlpc = MLPClassifier(
solver='sgd',
learning_rate = 'adaptive',
)
Examples = {
'TrumpDefault': {
'frame': trumpECHP,
},
'TrumpSGD': {
'frame': trumpECHP,
'mlpc': mlpc
},
} | mit |
breznak/NAB | nab/labeler.py | 8 | 16181 | # ----------------------------------------------------------------------
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import datetime
import itertools
import numpy
import os
import pandas
try:
import simplejson as json
except ImportError:
import json
from nab.util import (absoluteFilePaths,
getProbationPeriod,
strf,
strp,
deepmap,
createPath,
writeJSON)
def bucket(rawTimes, buffer):
"""
Buckets (groups) timestamps that are within the amount of time specified by
buffer.
"""
bucket = []
rawBuckets = []
current = None
for t in rawTimes:
if current is None:
current = t
bucket = [current]
continue
if (t - current) <= buffer:
bucket.append(t)
else:
rawBuckets.append(bucket)
current = t
bucket = [current]
if bucket:
rawBuckets.append(bucket)
return rawBuckets
def merge(rawBuckets, threshold):
"""
Merges bucketed timestamps into one timestamp (most frequent, or earliest).
"""
truths = []
passed = []
for bucket in rawBuckets:
if len(bucket) >= threshold:
truths.append(max(bucket, key=bucket.count))
else:
passed.append(bucket)
return truths, passed
def checkForOverlap(labels, buffer, labelsFileName, dataFileName):
"""
Raise a ValueError if the difference between any consecutive labels is smaller
than the buffer.
"""
for i in xrange(len(labels)-1):
if labels[i+1] - labels[i] <= buffer:
# import pdb; pdb.set_trace()
raise ValueError("The labels {} and {} in \'{}\' labels for data file "
"\'{}\' are too close to each other to be considered distinct "
"anomalies. Please relabel."
.format(labels[i], labels[i+1], labelsFileName, dataFileName))
class CorpusLabel(object):
"""
Class to store and manipulate a single set of labels for the whole
benchmark corpus.
"""
def __init__(self, path, corpus):
"""
Initializes a CorpusLabel object by getting the anomaly windows and labels.
When this is done for combining raw user labels, we skip getLabels()
because labels are not yet created.
@param path (string) Name of file containing the set of labels.
@param corpus (nab.Corpus) Corpus object.
"""
self.path = path
self.windows = None
self.labels = None
self.corpus = corpus
self.getWindows()
if "raw" not in self.path:
# Do not get labels from files in the path nab/labels/raw
self.getLabels()
def getWindows(self):
"""
Read JSON label file. Get timestamps as dictionaries with key:value pairs of
a relative path and its corresponding list of windows.
"""
def found(t, data):
f = data["timestamp"][data["timestamp"] == pandas.tslib.Timestamp(t)]
exists = (len(f) == 1)
return exists
with open(os.path.join(self.path)) as windowFile:
windows = json.load(windowFile)
self.windows = {}
for relativePath in windows.keys():
self.windows[relativePath] = deepmap(strp, windows[relativePath])
if len(self.windows[relativePath]) == 0:
continue
data = self.corpus.dataFiles[relativePath].data
if "raw" in self.path:
timestamps = windows[relativePath]
else:
timestamps = list(itertools.chain.from_iterable(windows[relativePath]))
# Check that timestamps are present in dataset
if not all([found(t,data) for t in timestamps]):
raise ValueError("In the label file %s, one of the timestamps used for "
"the datafile %s doesn't match; it does not exist in "
"the file. Timestamps in json label files have to "
"exactly match timestamps in corresponding datafiles."
% (self.path, relativePath))
def validateLabels(self):
"""
This is run at the end of the label combining process (see
scripts/combine_labels.py) to validate the resulting ground truth windows,
specifically that they are distinct (unique, non-overlapping).
"""
with open(os.path.join(self.path)) as windowFile:
windows = json.load(windowFile)
self.windows = {}
for relativePath in windows.keys():
self.windows[relativePath] = deepmap(strp, windows[relativePath])
if len(self.windows[relativePath]) == 0:
continue
num_windows = len(self.windows[relativePath])
if num_windows > 1:
if not all([(self.windows[relativePath][i+1][0]
- self.windows[relativePath][i][1]).total_seconds() >= 0
for i in xrange(num_windows-1)]):
raise ValueError("In the label file %s, windows overlap." % self.path)
def getLabels(self):
"""
Get Labels as a dictionary of key-value pairs of a relative path and its
corresponding binary vector of anomaly labels. Labels are simply a more
verbose version of the windows.
"""
self.labels = {}
for relativePath, dataSet in self.corpus.dataFiles.iteritems():
if self.windows.has_key(relativePath):
windows = self.windows[relativePath]
labels = pandas.DataFrame({"timestamp": dataSet.data["timestamp"]})
labels['label'] = 0
for t1, t2 in windows:
moreThanT1 = labels[labels["timestamp"] >= t1]
betweenT1AndT2 = moreThanT1[moreThanT1["timestamp"] <= t2]
indices = betweenT1AndT2.loc[:,"label"].index
labels["label"].values[indices.values] = 1
self.labels[relativePath] = labels
else:
print "Warning: no label for datafile",relativePath
class LabelCombiner(object):
"""
This class is used to combine labels from multiple human labelers, and the set
of manual labels (known anomalies).
The output is a single ground truth label file containing anomalies where
there is enough human agreement. The class also computes the window around
each anomaly. The exact logic is described elsewhere in the NAB
documentation.
"""
def __init__(self, labelDir, corpus,
threshold, windowSize,
probationaryPercent, verbosity):
"""
@param labelDir (string) A directory name containing user label files.
This directory should contain one label file
per human labeler.
@param corpus (Corpus) Instance of Corpus class.
@param threshold (float) A percentage between 0 and 1, specifying the
agreement threshold. It describes the level
of agreement needed between individual
labelers before a particular point in a
data file is labeled as anomalous in the
combined file.
@param windowSize (float) Estimated size of an anomaly window, as a
ratio the dataset length.
@param verbosity (int) 0, 1, or 2 to print out select labeling
metrics; 0 is none, 2 is the most.
"""
self.labelDir = labelDir
self.corpus = corpus
self.threshold = threshold
self.windowSize = windowSize
self.probationaryPercent = probationaryPercent
self.verbosity = verbosity
self.userLabels = None
self.nLabelers = None
self.knownLabels = None
self.combinedWindows = None
def __str__(self):
ans = ""
ans += "labelDir: %s\n" % self.labelDir
ans += "corpus: %s\n" % self.corpus
ans += "number of labelers: %d\n" % self.nLabelers
ans += "agreement threshold: %d\n" % self.threshold
return ans
def write(self, labelsPath, windowsPath):
"""Write the combined labels and windows to destination directories."""
if not os.path.isdir(labelsPath):
createPath(labelsPath)
if not os.path.isdir(windowsPath):
createPath(windowsPath)
writeJSON(labelsPath, self.labelTimestamps)
writeJSON(windowsPath, self.combinedWindows)
def combine(self):
"""Combine raw and known labels in anomaly windows."""
self.getRawLabels()
self.combineLabels()
self.editPoorLabels()
self.applyWindows()
self.checkWindows()
def getRawLabels(self):
"""Collect the raw user labels from specified directory."""
labelPaths = absoluteFilePaths(self.labelDir)
self.userLabels = []
self.knownLabels = []
for path in labelPaths:
if "known" in path:
self.knownLabels.append(CorpusLabel(path, self.corpus))
else:
self.userLabels.append(CorpusLabel(path, self.corpus))
self.nLabelers = len(self.userLabels)
if self.nLabelers == 0:
raise ValueError("No users labels found")
def combineLabels(self):
"""
Combines raw user labels to create set of true anomaly labels.
A buffer is used to bucket labels that identify the same anomaly. The buffer
is half the estimated window size of an anomaly -- approximates an average
of two anomalies per dataset, and no window can have > 1 anomaly.
After bucketing, a label becomes a true anomaly if it was labeled by a
proportion of the users greater than the defined threshold. Then the bucket
is merged into one timestamp -- the ground truth label.
The set of known anomaly labels are added as well. These have been manually
labeled because we know the direct causes of the anomalies. They are added
as if they are the result of the bucket-merge process.
If verbosity > 0, the dictionary passedLabels -- the raw labels that did not
pass the threshold qualification -- is printed to the console.
"""
def setTruthLabels(dataSet, trueAnomalies):
"""Returns the indices of the ground truth anomalies for a data file."""
timestamps = dataSet.data["timestamp"]
labels = numpy.array(timestamps.isin(trueAnomalies), dtype=int)
return [i for i in range(len(labels)) if labels[i]==1]
self.labelTimestamps = {}
self.labelIndices = {}
for relativePath, dataSet in self.corpus.dataFiles.iteritems():
if ("Known" in relativePath) or ("artificial" in relativePath):
knownAnomalies = self.knownLabels[0].windows[relativePath]
self.labelTimestamps[relativePath] = [str(t) for t in knownAnomalies]
self.labelIndices[relativePath] = setTruthLabels(dataSet, knownAnomalies)
continue
# Calculate the window buffer -- used for bucketing labels identifying
# the same anomaly.
granularity = dataSet.data["timestamp"][1] - dataSet.data["timestamp"][0]
buffer = datetime.timedelta(minutes=
granularity.total_seconds()/60 * len(dataSet.data) * self.windowSize/10)
rawTimesLists = []
userCount = 0
for user in self.userLabels:
if relativePath in user.windows:
# the user has labels for this file
checkForOverlap(
user.windows[relativePath], buffer, user.path, relativePath)
rawTimesLists.append(user.windows[relativePath])
userCount += 1
if not rawTimesLists:
# no labeled anomalies for this data file
self.labelTimestamps[relativePath] = []
self.labelIndices[relativePath] = setTruthLabels(dataSet, [])
continue
else:
rawTimes = list(itertools.chain.from_iterable(rawTimesLists))
rawTimes.sort()
# Bucket and merge the anomaly timestamps.
threshold = userCount * self.threshold
trueAnomalies, passedAnomalies = merge(
bucket(rawTimes, buffer), threshold)
self.labelTimestamps[relativePath] = [str(t) for t in trueAnomalies]
self.labelIndices[relativePath] = setTruthLabels(dataSet, trueAnomalies)
if self.verbosity>0:
print "----"
print "For %s the passed raw labels and qualified true labels are,"\
" respectively:" % relativePath
print passedAnomalies
print trueAnomalies
return self.labelTimestamps, self.labelIndices
def editPoorLabels(self):
"""
This edits labels that have been flagged for manual revision. From
inspecting the data and anomaly windows, we have determined some combined
labels should be revised, or not included in the ground truth labels.
"""
count = 0
for relativePath, indices in self.labelIndices.iteritems():
if "iio_us-east-1_i-a2eb1cd9_NetworkIn" in relativePath:
self.labelIndices[relativePath] = [249, 339]
count += len(indices)
if self.verbosity > 0:
print "============================================================="
print "Total ground truth anomalies in benchmark dataset =", count
def applyWindows(self):
"""
This takes all the true anomalies, as calculated by combineLabels(), and
adds a standard window. The window length is the class variable windowSize,
and the location is centered on the anomaly timestamp.
If verbosity = 2, the window metrics are printed to the console.
"""
allWindows = {}
for relativePath, anomalies in self.labelIndices.iteritems():
data = self.corpus.dataFiles[relativePath].data
length = len(data)
num = len(anomalies)
if num:
windowLength = int(self.windowSize * length / len(anomalies))
else:
windowLength = int(self.windowSize * length)
if self.verbosity==2:
print "----"
print "Window metrics for file", relativePath
print "file length =", length, ";" \
"number of windows =", num, ";" \
"window length =", windowLength
windows = []
for a in anomalies:
front = max(a - windowLength/2, 0)
back = min(a + windowLength/2, length-1)
windowLimit = [strf(data["timestamp"][front]),
strf(data["timestamp"][back])]
windows.append(windowLimit)
allWindows[relativePath] = windows
self.combinedWindows = allWindows
def checkWindows(self):
"""
This takes the anomaly windows and checks for overlap with both each other
and with the probationary period. Overlapping windows are merged into a
single window. Windows overlapping with the probationary period are deleted.
"""
for relativePath, windows in self.combinedWindows.iteritems():
numWindows = len(windows)
if numWindows > 0:
fileLength = self.corpus.dataFiles[relativePath].data.shape[0]
probationIndex = getProbationPeriod(
self.probationaryPercent, fileLength)
probationTimestamp = self.corpus.dataFiles[relativePath].data[
"timestamp"][probationIndex]
if (pandas.to_datetime(windows[0][0])
-probationTimestamp).total_seconds() < 0:
del windows[0]
print ("The first window in {} overlaps with the probationary period "
", so we're deleting it.".format(relativePath))
i = 0
while len(windows)-1 > i:
if (pandas.to_datetime(windows[i+1][0])
- pandas.to_datetime(windows[i][1])).total_seconds() <= 0:
# merge windows
windows[i] = [windows[i][0], windows[i+1][1]]
del windows[i+1]
i += 1
| agpl-3.0 |
skoslowski/gnuradio | gr-filter/examples/fir_filter_fff.py | 3 | 3371 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_arg import eng_float, intx
from argparse import ArgumentParser
import sys
import numpy
try:
from matplotlib import pyplot
except ImportError:
print("Error: could not from matplotlib import pyplot (http://matplotlib.sourceforge.net/)")
sys.exit(1)
class example_fir_filter_fff(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print("Num. Taps: ", len(taps))
self.src = analog.noise_source_f(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_float, self._nsamps)
self.filt0 = filter.fir_filter_fff(self._decim, taps)
self.vsnk_src = blocks.vector_sink_f()
self.vsnk_out = blocks.vector_sink_f()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = ArgumentParser(conflict_handler="resolve")
parser.add_argument("-N", "--nsamples", type=int, default=10000,
help="Number of samples to process [default=%(default)r]")
parser.add_argument("-s", "--samplerate", type=eng_float, default=8000,
help="System sample rate [default=%(default)r]")
parser.add_argument("-B", "--bandwidth", type=eng_float, default=1000,
help="Filter bandwidth [default=%(default)r]")
parser.add_argument("-T", "--transition", type=eng_float, default=100,
help="Transition band [default=%(default)r]")
parser.add_argument("-A", "--attenuation", type=eng_float, default=80,
help="Stopband attenuation [default=%(default)r]")
parser.add_argument("-D", "--decimation", type=int, default=1,
help="Decmation factor [default=%(default)r]")
args = parser.parse_args()
put = example_fir_filter_fff(args.nsamples,
args.samplerate,
args.bandwidth,
args.transition,
args.attenuation,
args.decimation)
put.run()
data_src = numpy.array(put.vsnk_src.data())
data_snk = numpy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pyplot.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft / 4,
Fs=args.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft / 4,
Fs=args.samplerate)
f2 = pyplot.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pyplot.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
kayak/fireant | fireant/queries/builder/dimension_latest_query_builder.py | 1 | 2026 | import pandas as pd
from fireant.dataset.fields import Field
from fireant.utils import (
alias_for_alias_selector,
immutable,
)
from fireant.queries.builder.query_builder import QueryBuilder, QueryException, add_hints
from fireant.queries.execution import fetch_data
class DimensionLatestQueryBuilder(QueryBuilder):
def __init__(self, dataset):
super().__init__(dataset)
@immutable
def __call__(self, dimension: Field, *dimensions: Field):
self._dimensions += [dimension] + list(dimensions)
@property
def sql(self):
"""
Serializes this query builder as a set of SQL queries. This method will always return a list of one
query since only one query is required to retrieve dimension choices.
This function only handles dimensions (select+group by) and filtering (where/having), which is everything
needed for the query to fetch choices for dimensions.
The dataset query extends this with metrics, references, and totals.
"""
if not self.dimensions:
raise QueryException("Must select at least one dimension to query latest values")
query = self.dataset.database.make_latest_query(
base_table=self.table,
joins=self.dataset.joins,
dimensions=self.dimensions,
)
return [query]
def fetch(self, hint=None):
queries = add_hints(self.sql, hint)
max_rows_returned, data = fetch_data(self.dataset.database, queries, self.dimensions)
data = self._get_latest_data_from_df(data)
return self._transform_for_return(data, max_rows_returned=max_rows_returned)
def _get_latest_data_from_df(self, df: pd.DataFrame) -> pd.Series:
latest = df.reset_index().iloc[0]
# Remove the row index as the name and trim the special dimension key characters from the dimension key
latest.name = None
latest.index = [alias_for_alias_selector(alias) for alias in latest.index]
return latest
| apache-2.0 |
anomam/pvlib-python | pvlib/tests/test_bifacial.py | 2 | 5958 | import pandas as pd
import numpy as np
from datetime import datetime
from pvlib.bifacial import pvfactors_timeseries, PVFactorsReportBuilder
from conftest import requires_pvfactors
import pytest
@requires_pvfactors
@pytest.mark.parametrize('run_parallel_calculations',
[False, True])
def test_pvfactors_timeseries(run_parallel_calculations):
""" Test that pvfactors is functional, using the TLDR section inputs of the
package github repo README.md file:
https://github.com/SunPower/pvfactors/blob/master/README.md#tldr---quick-start"""
# Create some inputs
timestamps = pd.DatetimeIndex([datetime(2017, 8, 31, 11),
datetime(2017, 8, 31, 12)]
).set_names('timestamps')
solar_zenith = [20., 10.]
solar_azimuth = [110., 140.]
surface_tilt = [10., 0.]
surface_azimuth = [90., 90.]
axis_azimuth = 0.
dni = [1000., 300.]
dhi = [50., 500.]
gcr = 0.4
pvrow_height = 1.75
pvrow_width = 2.44
albedo = 0.2
n_pvrows = 3
index_observed_pvrow = 1
rho_front_pvrow = 0.03
rho_back_pvrow = 0.05
horizon_band_angle = 15.
# Expected values
expected_ipoa_front = pd.Series([1034.95474708997, 795.4423259036623],
index=timestamps,
name=('total_inc_front'))
expected_ipoa_back = pd.Series([91.88707460262768, 78.05831585685215],
index=timestamps,
name=('total_inc_back'))
# Run calculation
ipoa_front, ipoa_back = pvfactors_timeseries(
solar_azimuth, solar_zenith, surface_azimuth, surface_tilt,
axis_azimuth,
timestamps, dni, dhi, gcr, pvrow_height, pvrow_width, albedo,
n_pvrows=n_pvrows, index_observed_pvrow=index_observed_pvrow,
rho_front_pvrow=rho_front_pvrow, rho_back_pvrow=rho_back_pvrow,
horizon_band_angle=horizon_band_angle,
run_parallel_calculations=run_parallel_calculations,
n_workers_for_parallel_calcs=-1)
pd.testing.assert_series_equal(ipoa_front, expected_ipoa_front)
pd.testing.assert_series_equal(ipoa_back, expected_ipoa_back)
@requires_pvfactors
@pytest.mark.parametrize('run_parallel_calculations',
[False, True])
def test_pvfactors_timeseries_pandas_inputs(run_parallel_calculations):
""" Test that pvfactors is functional, using the TLDR section inputs of the
package github repo README.md file, but converted to pandas Series:
https://github.com/SunPower/pvfactors/blob/master/README.md#tldr---quick-start"""
# Create some inputs
timestamps = pd.DatetimeIndex([datetime(2017, 8, 31, 11),
datetime(2017, 8, 31, 12)]
).set_names('timestamps')
solar_zenith = pd.Series([20., 10.])
solar_azimuth = pd.Series([110., 140.])
surface_tilt = pd.Series([10., 0.])
surface_azimuth = pd.Series([90., 90.])
axis_azimuth = 0.
dni = pd.Series([1000., 300.])
dhi = pd.Series([50., 500.])
gcr = 0.4
pvrow_height = 1.75
pvrow_width = 2.44
albedo = 0.2
n_pvrows = 3
index_observed_pvrow = 1
rho_front_pvrow = 0.03
rho_back_pvrow = 0.05
horizon_band_angle = 15.
# Expected values
expected_ipoa_front = pd.Series([1034.95474708997, 795.4423259036623],
index=timestamps,
name=('total_inc_front'))
expected_ipoa_back = pd.Series([91.88707460262768, 78.05831585685215],
index=timestamps,
name=('total_inc_back'))
# Run calculation
ipoa_front, ipoa_back = pvfactors_timeseries(
solar_azimuth, solar_zenith, surface_azimuth, surface_tilt,
axis_azimuth,
timestamps, dni, dhi, gcr, pvrow_height, pvrow_width, albedo,
n_pvrows=n_pvrows, index_observed_pvrow=index_observed_pvrow,
rho_front_pvrow=rho_front_pvrow, rho_back_pvrow=rho_back_pvrow,
horizon_band_angle=horizon_band_angle,
run_parallel_calculations=run_parallel_calculations,
n_workers_for_parallel_calcs=-1)
pd.testing.assert_series_equal(ipoa_front, expected_ipoa_front)
pd.testing.assert_series_equal(ipoa_back, expected_ipoa_back)
def test_build_1():
"""Test that build correctly instantiates a dictionary, when passed a Nones
for the report and pvarray arguments.
"""
report = None
pvarray = None
expected = {'total_inc_back': [np.nan], 'total_inc_front': [np.nan]}
assert expected == PVFactorsReportBuilder.build(report, pvarray)
def test_merge_1():
"""Test that merge correctly returns the first element of the reports
argument when there is only dictionary in reports.
"""
test_dict = {'total_inc_back': [1, 2, 3], 'total_inc_front': [4, 5, 6]}
reports = [test_dict]
assert test_dict == PVFactorsReportBuilder.merge(reports)
def test_merge_2():
"""Test that merge correctly combines two dictionary reports.
"""
test_dict_1 = {'total_inc_back': [1, 2], 'total_inc_front': [4, 5]}
test_dict_2 = {'total_inc_back': [3], 'total_inc_front': [6]}
expected = {'total_inc_back': [1, 2, 3], 'total_inc_front': [4, 5, 6]}
reports = [test_dict_1, test_dict_2]
assert expected == PVFactorsReportBuilder.merge(reports)
def test_merge_3():
"""Test that merge correctly combines three dictionary reports.
"""
test_dict_1 = {'total_inc_back': [1], 'total_inc_front': [4]}
test_dict_2 = {'total_inc_back': [2], 'total_inc_front': [5]}
test_dict_3 = {'total_inc_back': [3], 'total_inc_front': [6]}
expected = {'total_inc_back': [1, 2, 3], 'total_inc_front': [4, 5, 6]}
reports = [test_dict_1, test_dict_2, test_dict_3]
assert expected == PVFactorsReportBuilder.merge(reports)
| bsd-3-clause |
sumspr/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
cython-testbed/pandas | pandas/tests/groupby/test_function.py | 3 | 38905 | import pytest
import numpy as np
import pandas as pd
from pandas import (DataFrame, Index, compat, isna,
Series, MultiIndex, Timestamp, date_range)
from pandas.errors import UnsupportedFunctionCall
from pandas.util import testing as tm
import pandas.core.nanops as nanops
from string import ascii_lowercase
from pandas.compat import product as cart_product
@pytest.mark.parametrize("agg_func", ['any', 'all'])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("vals", [
['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''],
[1, 2, 3], [1, 0, 0], [0, 0, 0],
[1., 2., 3.], [1., 0., 0.], [0., 0., 0.],
[True, True, True], [True, False, False], [False, False, False],
[np.nan, np.nan, np.nan]
])
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2})
# Figure out expectation using Python builtin
exp = getattr(compat.builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == 'any':
exp = False
exp_df = DataFrame([exp] * 2, columns=['val'], index=Index(
['a', 'b'], name='key'))
result = getattr(df.groupby('key'), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({'nn': [11, 11, 22, 22],
'ii': [1, 2, 3, 4],
'ss': 4 * ['mama']})
result = aa.groupby('nn').max()
assert 'ss' in result
result = aa.groupby('nn').max(numeric_only=False)
assert 'ss' in result
result = aa.groupby('nn').min()
assert 'ss' in result
result = aa.groupby('nn').min(numeric_only=False)
assert 'ss' in result
def test_intercept_builtin_sum():
s = Series([1., 2., np.nan, 3.])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(compat.builtins.sum)
result2 = grouped.apply(compat.builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize('keys', [
"jim", # Single key
["jim", "joe"] # Multi-key
])
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = ("invalid frame shape: {} "
"(expected ({}, 3))".format(result.shape, ngroups))
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)))
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(),
getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
expected_columns_numeric = Index(['int', 'float', 'category_int'])
# mean / median
expected = pd.DataFrame(
{'category_int': [7.5, 9],
'float': [4.5, 6.],
'timedelta': [pd.Timedelta('1.5s'),
pd.Timedelta('3s')],
'int': [1.5, 3],
'datetime': [pd.Timestamp('2013-01-01 12:00:00'),
pd.Timestamp('2013-01-03 00:00:00')],
'datetimetz': [
pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'),
pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]},
index=Index([1, 2], name='group'),
columns=['int', 'float', 'category_int',
'datetime', 'datetimetz', 'timedelta'])
for attr in ['mean', 'median']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(['int', 'float', 'string',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['min', 'max']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['first', 'last']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_int', 'timedelta'])
for attr in ['sum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int'])
for attr in ['prod', 'cumprod']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(['int', 'float',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['cummin', 'cummax']:
f = getattr(df.groupby('group'), attr)
result = f()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int',
'timedelta'])
for attr in ['cumsum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, 'baz']],
columns=['A', 'B', 'C'])
g = df.groupby('A')
gni = df.groupby('A', as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[0., 0.], [0, np.nan]], columns=['A', 'B'],
index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name='A')
expected_col = pd.MultiIndex(levels=[['B'],
['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max']],
labels=[[0] * 8, list(range(8))])
expected = pd.DataFrame([[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]],
index=expected_index,
columns=expected_col)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat([df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T])
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame([[True, True], [False, True]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=['B'], index=[1, 3])
expected.index.name = 'A'
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame(
[[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]
], columns=['A', 'B', 'C'])
expected = DataFrame(
[[2, np.nan], [np.nan, 9], [4, 9]], columns=['B', 'C'])
result = df.groupby('A').cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby('A', as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby('A').cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby('A').cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [
'int8', 'int16', 'int32', 'int64', 'float32', 'float64'])
@pytest.mark.parametrize("method,data", [
('first', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('last', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('min', {'df': [{'a': 1, 'b': 1}, {'a': 2, 'b': 3}]}),
('max', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}]}),
('nth', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 4}],
'args': [1]}),
('count', {'df': [{'a': 1, 'b': 2}, {'a': 2, 'b': 2}],
'out_type': 'int64'})
])
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{'a': 1, 'b': 1},
{'a': 1, 'b': 2},
{'a': 2, 'b': 3},
{'a': 2, 'b': 4}])
df['b'] = df.b.astype(dtype)
if 'args' not in data:
data['args'] = []
if 'out_type' in data:
out_type = data['out_type']
else:
out_type = dtype
exp = data['df']
df_out = pd.DataFrame(exp)
df_out['b'] = df_out.b.astype(out_type)
df_out.set_index('a', inplace=True)
grpd = df.groupby('a')
t = getattr(grpd, method)(*data['args'])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize("i", [
(Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448")),
(24650000000000001, 24650000000000002)
])
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
grp_exp = {"first": {"expected": i[0]},
"last": {"expected": i[1]},
"min": {"expected": i[0]},
"max": {"expected": i[1]},
"nth": {"expected": i[1],
"args": [1]},
"count": {"expected": 2}}
for method, data in compat.iteritems(grp_exp):
if "args" not in data:
data["args"] = []
grouped = df.groupby("a")
res = getattr(grouped, method)(*data["args"])
assert res.iloc[0].b == data["expected"]
def test_fill_consistency():
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(index=pd.MultiIndex.from_product(
[['value1', 'value2'], date_range('2014-01-01', '2014-01-06')]),
columns=Index(
['1', '2'], name='id'))
df['1'] = [np.nan, 1, np.nan, np.nan, 11, np.nan, np.nan, 2, np.nan,
np.nan, 22, np.nan]
df['2'] = [np.nan, 3, np.nan, np.nan, 33, np.nan, np.nan, 4, np.nan,
np.nan, 44, np.nan]
expected = df.groupby(level=0, axis=0).fillna(method='ffill')
result = df.T.groupby(level=0, axis=1).fillna(method='ffill').T
tm.assert_frame_equal(result, expected)
def test_groupby_cumprod():
# GH 4095
df = pd.DataFrame({'key': ['b'] * 10, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({'key': ['b'] * 100, 'value': 2})
actual = df.groupby('key')['value'].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df['value'] = df['value'].astype(float)
expected = df.groupby('key')['value'].apply(lambda x: x.cumprod())
expected.name = 'value'
tm.assert_series_equal(actual, expected)
def test_ops_general():
ops = [('mean', np.mean),
('median', np.median),
('std', np.std),
('var', np.var),
('sum', np.sum),
('prod', np.prod),
('min', np.min),
('max', np.max),
('first', lambda x: x.iloc[0]),
('last', lambda x: x.iloc[-1]),
('count', np.size), ]
try:
from scipy.stats import sem
except ImportError:
pass
else:
ops.append(('sem', sem))
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
for op, targop in ops:
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
try:
tm.assert_frame_equal(result, expected)
except BaseException as exc:
exc.args += ('operation: %s' % op, )
raise
def test_max_nan_bug():
raw = """,Date,app,File
-04-23,2013-04-23 00:00:00,,log080001.log
-05-06,2013-05-06 00:00:00,,log.log
-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(compat.StringIO(raw), parse_dates=[0])
gb = df.groupby('Date')
r = gb[['File']].max()
e = gb['File'].max().to_frame()
tm.assert_frame_equal(r, e)
assert not r['File'].isna().any()
def test_nlargest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series([
7, 5, 3, 10, 9, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [3, 2, 1, 9, 5, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
3, 2, 1, 3, 3, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [2, 3, 1, 6, 5, 7]]))
tm.assert_series_equal(gb.nlargest(3, keep='last'), e)
def test_nsmallest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list('a' * 5 + 'b' * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series([
1, 2, 3, 0, 4, 6
], index=MultiIndex.from_arrays([list('aaabbb'), [0, 4, 1, 6, 7, 8]]))
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series([
0, 1, 1, 0, 1, 2
], index=MultiIndex.from_arrays([list('aaabbb'), [4, 1, 0, 9, 8, 7]]))
tm.assert_series_equal(gb.nsmallest(3, keep='last'), e)
def test_numpy_compat():
# see gh-12811
df = pd.DataFrame({'A': [1, 2, 1], 'B': [1, 2, 3]})
g = df.groupby('A')
msg = "numpy operations are not valid with groupby"
for func in ('mean', 'var', 'std', 'cumprod', 'cumsum'):
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(g, func), 1, 2, 3)
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(g, func), foo=1)
def test_cummin_cummax():
# GH 15048
num_types = [np.int32, np.int64, np.float32, np.float64]
num_mins = [np.iinfo(np.int32).min, np.iinfo(np.int64).min,
np.finfo(np.float32).min, np.finfo(np.float64).min]
num_max = [np.iinfo(np.int32).max, np.iinfo(np.int64).max,
np.finfo(np.float32).max, np.finfo(np.float64).max]
base_df = pd.DataFrame({'A': [1, 1, 1, 1, 2, 2, 2, 2],
'B': [3, 4, 3, 2, 2, 3, 2, 1]})
expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]
for dtype, min_val, max_val in zip(num_types, num_mins, num_max):
df = base_df.astype(dtype)
# cummin
expected = pd.DataFrame({'B': expected_mins}).astype(dtype)
result = df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test cummin w/ min value for dtype
df.loc[[2, 6], 'B'] = min_val
expected.loc[[2, 3, 6, 7], 'B'] = min_val
result = df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
expected = df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# cummax
expected = pd.DataFrame({'B': expected_maxs}).astype(dtype)
result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
result = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test cummax w/ max value for dtype
df.loc[[2, 6], 'B'] = max_val
expected.loc[[2, 3, 6, 7], 'B'] = max_val
result = df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
expected = df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], 'B'] = np.nan
expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 2,
np.nan, 3, np.nan, 1]})
result = base_df.groupby('A').cummin()
tm.assert_frame_equal(result, expected)
expected = (base_df.groupby('A')
.B
.apply(lambda x: x.cummin())
.to_frame())
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame({'B': [np.nan, 4, np.nan, 4,
np.nan, 3, np.nan, 3]})
result = base_df.groupby('A').cummax()
tm.assert_frame_equal(result, expected)
expected = (base_df.groupby('A')
.B
.apply(lambda x: x.cummax())
.to_frame())
tm.assert_frame_equal(result, expected)
# Test nan in entire column
base_df['B'] = np.nan
expected = pd.DataFrame({'B': [np.nan] * 8})
result = base_df.groupby('A').cummin()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').cummax()
tm.assert_frame_equal(expected, result)
result = base_df.groupby('A').B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(expected, result)
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(['2001'])))
expected = pd.Series(pd.to_datetime('2001'), index=[0], name='b')
for method in ['cummax', 'cummin']:
result = getattr(df.groupby('a')['b'], method)()
tm.assert_series_equal(expected, result)
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1]))
result = df.groupby('a').b.cummax()
expected = pd.Series([2, 1, 2], name='b')
tm.assert_series_equal(result, expected)
df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2]))
result = df.groupby('a').b.cummin()
expected = pd.Series([1, 2, 1], name='b')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('in_vals, out_vals', [
# Basics: strictly increasing (T), strictly decreasing (F),
# abs val increasing (F), non-strictly increasing (T)
([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1],
[True, False, False, True]),
# Test with inf vals
([1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
[True, False, True, False]),
# Test with nan vals; should always be False
([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False]),
])
def test_is_monotonic_increasing(in_vals, out_vals):
# GH 17015
source_dict = {
'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
'C': in_vals}
df = pd.DataFrame(source_dict)
result = df.groupby('B').C.is_monotonic_increasing
index = Index(list('abcd'), name='B')
expected = pd.Series(index=index, data=out_vals, name='C')
tm.assert_series_equal(result, expected)
# Also check result equal to manually taking x.is_monotonic_increasing.
expected = (
df.groupby(['B']).C.apply(lambda x: x.is_monotonic_increasing))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('in_vals, out_vals', [
# Basics: strictly decreasing (T), strictly increasing (F),
# abs val decreasing (F), non-strictly increasing (T)
([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1],
[True, False, False, True]),
# Test with inf vals
([np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
[True, True, False, True]),
# Test with nan vals; should always be False
([1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False]),
])
def test_is_monotonic_decreasing(in_vals, out_vals):
# GH 17015
source_dict = {
'A': ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'],
'B': ['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c', 'd', 'd'],
'C': in_vals}
df = pd.DataFrame(source_dict)
result = df.groupby('B').C.is_monotonic_decreasing
index = Index(list('abcd'), name='B')
expected = pd.Series(index=index, data=out_vals, name='C')
tm.assert_series_equal(result, expected)
# describe
# --------------------------------
def test_apply_describe_bug(mframe):
grouped = mframe.groupby(level='first')
grouped.describe() # it works!
def test_series_describe_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
tm.assert_series_equal(result['mean'], grouped.mean(),
check_names=False)
tm.assert_series_equal(result['std'], grouped.std(), check_names=False)
tm.assert_series_equal(result['min'], grouped.min(), check_names=False)
def test_series_describe_single():
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe().stack()
tm.assert_series_equal(result, expected)
def test_series_index_name(df):
grouped = df.loc[:, ['C']].groupby(df['A'])
result = grouped.agg(lambda x: x.mean())
assert result.index.name == 'A'
def test_frame_describe_multikey(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
desc_groups = []
for col in tsframe:
group = grouped[col].describe()
# GH 17464 - Remove duplicate MultiIndex levels
group_col = pd.MultiIndex(
levels=[[col], group.columns],
labels=[[0] * len(group.columns), range(len(group.columns))])
group = pd.DataFrame(group.values,
columns=group_col,
index=group.index)
desc_groups.append(group)
expected = pd.concat(desc_groups, axis=1)
tm.assert_frame_equal(result, expected)
groupedT = tsframe.groupby({'A': 0, 'B': 0,
'C': 1, 'D': 1}, axis=1)
result = groupedT.describe()
expected = tsframe.describe().T
expected.index = pd.MultiIndex(
levels=[[0, 1], expected.index],
labels=[[0, 0, 1, 1], range(len(expected.index))])
tm.assert_frame_equal(result, expected)
def test_frame_describe_tupleindex():
# GH 14848 - regression from 0.19.0 to 0.19.1
df1 = DataFrame({'x': [1, 2, 3, 4, 5] * 3,
'y': [10, 20, 30, 40, 50] * 3,
'z': [100, 200, 300, 400, 500] * 3})
df1['k'] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
df2 = df1.rename(columns={'k': 'key'})
pytest.raises(ValueError, lambda: df1.groupby('k').describe())
pytest.raises(ValueError, lambda: df2.groupby('key').describe())
def test_frame_describe_unstacked_format():
# GH 4792
prices = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 24990,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 25499,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 25499}
volumes = {pd.Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
pd.Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
pd.Timestamp('2011-01-06 12:54:09', tz=None): 100000000}
df = pd.DataFrame({'PRICE': prices,
'VOLUME': volumes})
result = df.groupby('PRICE').VOLUME.describe()
data = [df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
df[df.PRICE == 25499].VOLUME.describe().values.tolist()]
expected = pd.DataFrame(data,
index=pd.Index([24990, 25499], name='PRICE'),
columns=['count', 'mean', 'std', 'min',
'25%', '50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# nunique
# --------------------------------
@pytest.mark.parametrize('n', 10 ** np.arange(2, 6))
@pytest.mark.parametrize('m', [10, 100, 1000])
@pytest.mark.parametrize('sort', [False, True])
@pytest.mark.parametrize('dropna', [False, True])
def test_series_groupby_nunique(n, m, sort, dropna):
def check_nunique(df, keys, as_index=True):
gr = df.groupby(keys, as_index=as_index, sort=sort)
left = gr['julie'].nunique(dropna=dropna)
gr = df.groupby(keys, as_index=as_index, sort=sort)
right = gr['julie'].apply(Series.nunique, dropna=dropna)
if not as_index:
right = right.reset_index(drop=True)
tm.assert_series_equal(left, right, check_names=False)
days = date_range('2015-08-23', periods=10)
frame = DataFrame({'jim': np.random.choice(list(ascii_lowercase), n),
'joe': np.random.choice(days, n),
'julie': np.random.randint(0, m, n)})
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
frame.loc[1::17, 'jim'] = None
frame.loc[3::37, 'joe'] = None
frame.loc[7::19, 'julie'] = None
frame.loc[8::19, 'julie'] = None
frame.loc[9::19, 'julie'] = None
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
check_nunique(frame, ['jim'], as_index=False)
check_nunique(frame, ['jim', 'joe'], as_index=False)
def test_nunique():
df = DataFrame({
'A': list('abbacc'),
'B': list('abxacc'),
'C': list('abbacx'),
})
expected = DataFrame({'A': [1] * 3, 'B': [1, 2, 1], 'C': [1, 1, 2]})
result = df.groupby('A', as_index=False).nunique()
tm.assert_frame_equal(result, expected)
# as_index
expected.index = list('abc')
expected.index.name = 'A'
result = df.groupby('A').nunique()
tm.assert_frame_equal(result, expected)
# with na
result = df.replace({'x': None}).groupby('A').nunique(dropna=False)
tm.assert_frame_equal(result, expected)
# dropna
expected = DataFrame({'A': [1] * 3, 'B': [1] * 3, 'C': [1] * 3},
index=list('abc'))
expected.index.name = 'A'
result = df.replace({'x': None}).groupby('A').nunique()
tm.assert_frame_equal(result, expected)
def test_nunique_with_object():
# GH 11077
data = pd.DataFrame(
[[100, 1, 'Alice'],
[200, 2, 'Bob'],
[300, 3, 'Charlie'],
[-400, 4, 'Dan'],
[500, 5, 'Edith']],
columns=['amount', 'id', 'name']
)
result = data.groupby(['id', 'amount'])['name'].nunique()
index = MultiIndex.from_arrays([data.id, data.amount])
expected = pd.Series([1] * 5, name='name', index=index)
tm.assert_series_equal(result, expected)
def test_nunique_with_empty_series():
# GH 12553
data = pd.Series(name='name')
result = data.groupby(level=0).nunique()
expected = pd.Series(name='name', dtype='int64')
tm.assert_series_equal(result, expected)
def test_nunique_with_timegrouper():
# GH 13453
test = pd.DataFrame({
'time': [Timestamp('2016-06-28 09:35:35'),
Timestamp('2016-06-28 16:09:30'),
Timestamp('2016-06-28 16:46:28')],
'data': ['1', '2', '3']}).set_index('time')
result = test.groupby(pd.Grouper(freq='h'))['data'].nunique()
expected = test.groupby(
pd.Grouper(freq='h')
)['data'].apply(pd.Series.nunique)
tm.assert_series_equal(result, expected)
# count
# --------------------------------
def test_groupby_timedelta_cython_count():
df = DataFrame({'g': list('ab' * 2),
'delt': np.arange(4).astype('timedelta64[ns]')})
expected = Series([
2, 2
], index=pd.Index(['a', 'b'], name='g'), name='delt')
result = df.groupby('g').delt.count()
tm.assert_series_equal(expected, result)
def test_count():
n = 1 << 15
dr = date_range('2015-08-30', periods=n // 10, freq='T')
df = DataFrame({
'1st': np.random.choice(
list(ascii_lowercase), n),
'2nd': np.random.randint(0, 5, n),
'3rd': np.random.randn(n).round(3),
'4th': np.random.randint(-10, 10, n),
'5th': np.random.choice(dr, n),
'6th': np.random.randn(n).round(3),
'7th': np.random.randn(n).round(3),
'8th': np.random.choice(dr, n) - np.random.choice(dr, 1),
'9th': np.random.choice(
list(ascii_lowercase), n)
})
for col in df.columns.drop(['1st', '2nd', '4th']):
df.loc[np.random.choice(n, n // 10), col] = np.nan
df['9th'] = df['9th'].astype('category')
for key in '1st', '2nd', ['1st', '2nd']:
left = df.groupby(key).count()
right = df.groupby(key).apply(DataFrame.count).drop(key, axis=1)
tm.assert_frame_equal(left, right)
# GH5610
# count counts non-nulls
df = pd.DataFrame([[1, 2, 'foo'],
[1, np.nan, 'bar'],
[3, np.nan, np.nan]],
columns=['A', 'B', 'C'])
count_as = df.groupby('A').count()
count_not_as = df.groupby('A', as_index=False).count()
expected = DataFrame([[1, 2], [0, 0]], columns=['B', 'C'],
index=[1, 3])
expected.index.name = 'A'
tm.assert_frame_equal(count_not_as, expected.reset_index())
tm.assert_frame_equal(count_as, expected)
count_B = df.groupby('A')['B'].count()
tm.assert_series_equal(count_B, expected['B'])
def test_count_object():
df = pd.DataFrame({'a': ['a'] * 3 + ['b'] * 3, 'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([
3, 3
], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
df = pd.DataFrame({'a': ['a', np.nan, np.nan] + ['b'] * 3,
'c': [2] * 3 + [3] * 3})
result = df.groupby('c').a.count()
expected = pd.Series([
1, 3
], index=pd.Index([2, 3], name='c'), name='a')
tm.assert_series_equal(result, expected)
def test_count_cross_type():
# GH8169
vals = np.hstack((np.random.randint(0, 5, (100, 2)), np.random.randint(
0, 2, (100, 2))))
df = pd.DataFrame(vals, columns=['a', 'b', 'c', 'd'])
df[df == 2] = np.nan
expected = df.groupby(['c', 'd']).count()
for t in ['float32', 'object']:
df['a'] = df['a'].astype(t)
df['b'] = df['b'].astype(t)
result = df.groupby(['c', 'd']).count()
tm.assert_frame_equal(result, expected)
def test_lower_int_prec_count():
df = DataFrame({'a': np.array(
[0, 1, 2, 100], np.int8),
'b': np.array(
[1, 2, 3, 6], np.uint32),
'c': np.array(
[4, 5, 6, 8], np.int16),
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2],
'b': [2, 2],
'c': [2, 2]}, index=pd.Index(list('ab'),
name='grp'))
tm.assert_frame_equal(result, expected)
def test_count_uses_size_on_exception():
class RaisingObjectException(Exception):
pass
class RaisingObject(object):
def __init__(self, msg='I will raise inside Cython'):
super(RaisingObject, self).__init__()
self.msg = msg
def __eq__(self, other):
# gets called in Cython to check that raising calls the method
raise RaisingObjectException(self.msg)
df = DataFrame({'a': [RaisingObject() for _ in range(4)],
'grp': list('ab' * 2)})
result = df.groupby('grp').count()
expected = DataFrame({'a': [2, 2]}, index=pd.Index(
list('ab'), name='grp'))
tm.assert_frame_equal(result, expected)
# size
# --------------------------------
def test_size(df):
grouped = df.groupby(['A', 'B'])
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
grouped = df.groupby('A')
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
grouped = df.groupby('B')
result = grouped.size()
for key, group in grouped:
assert result[key] == len(group)
df = DataFrame(np.random.choice(20, (1000, 3)), columns=list('abc'))
for sort, key in cart_product((False, True), ('a', 'b', ['a', 'b'])):
left = df.groupby(key, sort=sort).size()
right = df.groupby(key, sort=sort)['c'].apply(lambda a: a.shape[0])
tm.assert_series_equal(left, right, check_names=False)
# GH11699
df = DataFrame([], columns=['A', 'B'])
out = Series([], dtype='int64', index=Index([], name='A'))
tm.assert_series_equal(df.groupby('A').size(), out)
# pipe
# --------------------------------
def test_pipe():
# Test the pipe method of DataFrameGroupBy.
# Issue #17871
random_state = np.random.RandomState(1234567890)
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': random_state.randn(8),
'C': random_state.randn(8)})
def f(dfgb):
return dfgb.B.max() - dfgb.C.min().min()
def square(srs):
return srs ** 2
# Note that the transformations are
# GroupBy -> Series
# Series -> Series
# This then chains the GroupBy.pipe and the
# NDFrame.pipe methods
result = df.groupby('A').pipe(f).pipe(square)
index = Index([u'bar', u'foo'], dtype='object', name=u'A')
expected = pd.Series([8.99110003361, 8.17516964785], name='B',
index=index)
tm.assert_series_equal(expected, result)
def test_pipe_args():
# Test passing args to the pipe method of DataFrameGroupBy.
# Issue #17871
df = pd.DataFrame({'group': ['A', 'A', 'B', 'B', 'C'],
'x': [1.0, 2.0, 3.0, 2.0, 5.0],
'y': [10.0, 100.0, 1000.0, -100.0, -1000.0]})
def f(dfgb, arg1):
return (dfgb.filter(lambda grp: grp.y.mean() > arg1, dropna=False)
.groupby(dfgb.grouper))
def g(dfgb, arg2):
return dfgb.sum() / dfgb.sum().sum() + arg2
def h(df, arg3):
return df.x + df.y - arg3
result = (df
.groupby('group')
.pipe(f, 0)
.pipe(g, 10)
.pipe(h, 100))
# Assert the results here
index = pd.Index(['A', 'B', 'C'], name='group')
expected = pd.Series([-79.5160891089, -78.4839108911, -80],
index=index)
tm.assert_series_equal(expected, result)
# test SeriesGroupby.pipe
ser = pd.Series([1, 1, 2, 2, 3, 3])
result = ser.groupby(ser).pipe(lambda grp: grp.sum() * grp.count())
expected = pd.Series([4, 8, 12], index=pd.Int64Index([1, 2, 3]))
tm.assert_series_equal(result, expected)
def test_groupby_mean_no_overflow():
# Regression test for (#22487)
df = pd.DataFrame({
"user": ["A", "A", "A", "A", "A"],
"connections": [4970, 4749, 4719, 4704, 18446744073699999744]
})
assert df.groupby('user')['connections'].mean()['A'] == 3689348814740003840
| bsd-3-clause |
ChanderG/scikit-learn | sklearn/neighbors/tests/test_kde.py | 208 | 5556 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
manahl/arctic | tests/integration/tickstore/test_ts_read.py | 1 | 30190 | # -*- coding: utf-8 -*-
from datetime import datetime as dt
import numpy as np
import pandas as pd
import pytest
import six
from mock import patch, call, Mock
from numpy.testing.utils import assert_array_equal
from pandas import DatetimeIndex
from pandas.util.testing import assert_frame_equal
from pymongo import ReadPreference
from arctic._util import mongo_count
from arctic.date import DateRange, mktz, CLOSED_CLOSED, CLOSED_OPEN, OPEN_CLOSED, OPEN_OPEN
from arctic.exceptions import NoDataFoundException
def test_read(tickstore_lib):
data = [{'ASK': 1545.25,
'ASKSIZE': 1002.0,
'BID': 1545.0,
'BIDSIZE': 55.0,
'CUMVOL': 2187387.0,
'DELETED_TIME': 0,
'INSTRTYPE': 'FUT',
'PRICE': 1545.0,
'SIZE': 1.0,
'TICK_STATUS': 0,
'TRADEHIGH': 1561.75,
'TRADELOW': 1537.25,
'index': 1185076787070},
{'CUMVOL': 354.0,
'DELETED_TIME': 0,
'PRICE': 1543.75,
'SIZE': 354.0,
'TRADEHIGH': 1543.75,
'TRADELOW': 1543.75,
'index': 1185141600600}]
tickstore_lib.write('FEED::SYMBOL', data)
df = tickstore_lib.read('FEED::SYMBOL', columns=['BID', 'ASK', 'PRICE'])
assert_array_equal(df['ASK'].values, np.array([1545.25, np.nan]))
assert_array_equal(df['BID'].values, np.array([1545, np.nan]))
assert_array_equal(df['PRICE'].values, np.array([1545, 1543.75]))
assert_array_equal(df.index.values.astype('object'), np.array([1185076787070000000, 1185141600600000000]))
assert tickstore_lib._collection.find_one()['c'] == 2
assert df.index.tzinfo == mktz()
def test_read_data_is_modifiable(tickstore_lib):
data = [{'ASK': 1545.25,
'ASKSIZE': 1002.0,
'BID': 1545.0,
'BIDSIZE': 55.0,
'CUMVOL': 2187387.0,
'DELETED_TIME': 0,
'INSTRTYPE': 'FUT',
'PRICE': 1545.0,
'SIZE': 1.0,
'TICK_STATUS': 0,
'TRADEHIGH': 1561.75,
'TRADELOW': 1537.25,
'index': 1185076787070},
{'CUMVOL': 354.0,
'DELETED_TIME': 0,
'PRICE': 1543.75,
'SIZE': 354.0,
'TRADEHIGH': 1543.75,
'TRADELOW': 1543.75,
'index': 1185141600600}]
tickstore_lib.write('FEED::SYMBOL', data)
df = tickstore_lib.read('FEED::SYMBOL', columns=['BID', 'ASK', 'PRICE'])
df[['BID', 'ASK', 'PRICE']] = 7
assert np.all(df[['BID', 'ASK', 'PRICE']].values == np.array([[7, 7, 7], [7, 7, 7]]))
def test_read_allow_secondary(tickstore_lib):
data = [{'ASK': 1545.25,
'ASKSIZE': 1002.0,
'BID': 1545.0,
'BIDSIZE': 55.0,
'CUMVOL': 2187387.0,
'DELETED_TIME': 0,
'INSTRTYPE': 'FUT',
'PRICE': 1545.0,
'SIZE': 1.0,
'TICK_STATUS': 0,
'TRADEHIGH': 1561.75,
'TRADELOW': 1537.25,
'index': 1185076787070},
{'CUMVOL': 354.0,
'DELETED_TIME': 0,
'PRICE': 1543.75,
'SIZE': 354.0,
'TRADEHIGH': 1543.75,
'TRADELOW': 1543.75,
'index': 1185141600600}]
tickstore_lib.write('FEED::SYMBOL', data)
with patch('pymongo.collection.Collection.find', side_effect=tickstore_lib._collection.find) as find:
with patch('pymongo.collection.Collection.with_options', side_effect=tickstore_lib._collection.with_options) as with_options:
with patch.object(tickstore_lib, '_read_preference', side_effect=tickstore_lib._read_preference) as read_pref:
df = tickstore_lib.read('FEED::SYMBOL', columns=['BID', 'ASK', 'PRICE'], allow_secondary=True)
assert read_pref.call_args_list == [call(True)]
assert with_options.call_args_list == [call(read_preference=ReadPreference.NEAREST)]
assert find.call_args_list == [call({'sy': 'FEED::SYMBOL'}, sort=[('s', 1)], projection={'s': 1, '_id': 0}),
call({'sy': 'FEED::SYMBOL', 's': {'$lte': dt(2007, 8, 21, 3, 59, 47, 70000)}},
projection={'sy': 1, 'cs.PRICE': 1, 'i': 1, 'cs.BID': 1, 's': 1, 'im': 1, 'v': 1, 'cs.ASK': 1})]
assert_array_equal(df['ASK'].values, np.array([1545.25, np.nan]))
assert tickstore_lib._collection.find_one()['c'] == 2
def test_read_symbol_as_column(tickstore_lib):
data = [{'ASK': 1545.25,
'index': 1185076787070},
{'CUMVOL': 354.0,
'index': 1185141600600}]
tickstore_lib.write('FEED::SYMBOL', data)
df = tickstore_lib.read('FEED::SYMBOL', columns=['SYMBOL', 'CUMVOL'])
assert all(df['SYMBOL'].values == ['FEED::SYMBOL'])
def test_read_multiple_symbols(tickstore_lib):
data1 = [{'ASK': 1545.25,
'ASKSIZE': 1002.0,
'BID': 1545.0,
'BIDSIZE': 55.0,
'CUMVOL': 2187387.0,
'DELETED_TIME': 0,
'INSTRTYPE': 'FUT',
'PRICE': 1545.0,
'SIZE': 1.0,
'TICK_STATUS': 0,
'TRADEHIGH': 1561.75,
'TRADELOW': 1537.25,
'index': 1185076787070}, ]
data2 = [{'CUMVOL': 354.0,
'DELETED_TIME': 0,
'PRICE': 1543.75,
'SIZE': 354.0,
'TRADEHIGH': 1543.75,
'TRADELOW': 1543.75,
'index': 1185141600600}]
tickstore_lib.write('BAR', data2)
tickstore_lib.write('FOO', data1)
df = tickstore_lib.read(['FOO', 'BAR'], columns=['BID', 'ASK', 'PRICE'])
assert all(df['SYMBOL'].values == ['FOO', 'BAR'])
assert_array_equal(df['ASK'].values, np.array([1545.25, np.nan]))
assert_array_equal(df['BID'].values, np.array([1545, np.nan]))
assert_array_equal(df['PRICE'].values, np.array([1545, 1543.75]))
assert_array_equal(df.index.values.astype('object'), np.array([1185076787070000000, 1185141600600000000]))
assert tickstore_lib._collection.find_one()['c'] == 1
@pytest.mark.parametrize('chunk_size', [1, 100])
def test_read_all_cols_all_dtypes(tickstore_lib, chunk_size):
data = [{'f': 0.1,
'of': 0.2,
's': 's',
'os': 'os',
'l': 1,
'ol': 2,
'index': dt(1970, 1, 1, tzinfo=mktz('UTC')),
},
{'f': 0.3,
'nf': 0.4,
's': 't',
'ns': 'ns',
'l': 3,
'nl': 4,
'index': dt(1970, 1, 1, 0, 0, 1, tzinfo=mktz('UTC')),
},
]
tickstore_lib._chunk_size = chunk_size
tickstore_lib.write('sym', data)
df = tickstore_lib.read('sym', columns=None)
assert df.index.tzinfo == mktz()
# The below is probably more trouble than it's worth, but we *should*
# be able to roundtrip data and get the same answer...
# Ints become floats
data[0]['l'] = float(data[0]['l'])
# Treat missing strings as None
data[0]['ns'] = None
data[1]['os'] = None
index = DatetimeIndex([dt(1970, 1, 1, tzinfo=mktz('UTC')),
dt(1970, 1, 1, 0, 0, 1, tzinfo=mktz('UTC'))],
)
df.index = df.index.tz_convert(mktz('UTC'))
expected = pd.DataFrame(data, index=index)
expected = expected[df.columns]
assert_frame_equal(expected, df, check_names=False)
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 1, 1, tzinfo=mktz('Europe/London'))
},
{'b': 3.,
'c': 4.,
'index': dt(2013, 1, 2, tzinfo=mktz('Europe/London'))
},
{'b': 5.,
'c': 6.,
'index': dt(2013, 1, 3, tzinfo=mktz('Europe/London'))
},
{'b': 7.,
'c': 8.,
'index': dt(2013, 1, 4, tzinfo=mktz('Europe/London'))
},
{'b': 9.,
'c': 10.,
'index': dt(2013, 1, 5, tzinfo=mktz('Europe/London'))
},
]
def test_date_range(tickstore_lib):
tickstore_lib.write('SYM', DUMMY_DATA)
df = tickstore_lib.read('SYM', date_range=DateRange(20130101, 20130103), columns=None)
assert_array_equal(df['a'].values, np.array([1, np.nan, np.nan]))
assert_array_equal(df['b'].values, np.array([2., 3., 5.]))
assert_array_equal(df['c'].values, np.array([np.nan, 4., 6.]))
tickstore_lib.delete('SYM')
# Chunk every 3 symbols and lets have some fun
tickstore_lib._chunk_size = 3
tickstore_lib.write('SYM', DUMMY_DATA)
with patch('pymongo.collection.Collection.find', side_effect=tickstore_lib._collection.find) as f:
df = tickstore_lib.read('SYM', date_range=DateRange(20130101, 20130103), columns=None)
assert_array_equal(df['b'].values, np.array([2., 3., 5.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 1
df = tickstore_lib.read('SYM', date_range=DateRange(20130102, 20130103), columns=None)
assert_array_equal(df['b'].values, np.array([3., 5.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 1
df = tickstore_lib.read('SYM', date_range=DateRange(20130103, 20130103), columns=None)
assert_array_equal(df['b'].values, np.array([5.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 1
df = tickstore_lib.read('SYM', date_range=DateRange(20130102, 20130104), columns=None)
assert_array_equal(df['b'].values, np.array([3., 5., 7.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 2
df = tickstore_lib.read('SYM', date_range=DateRange(20130102, 20130105), columns=None)
assert_array_equal(df['b'].values, np.array([3., 5., 7., 9.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 2
df = tickstore_lib.read('SYM', date_range=DateRange(20130103, 20130104), columns=None)
assert_array_equal(df['b'].values, np.array([5., 7.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 2
df = tickstore_lib.read('SYM', date_range=DateRange(20130103, 20130105), columns=None)
assert_array_equal(df['b'].values, np.array([5., 7., 9.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 2
df = tickstore_lib.read('SYM', date_range=DateRange(20130104, 20130105), columns=None)
assert_array_equal(df['b'].values, np.array([7., 9.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 1
# Test the different open-closed behaviours
df = tickstore_lib.read('SYM', date_range=DateRange(20130104, 20130105, CLOSED_CLOSED), columns=None)
assert_array_equal(df['b'].values, np.array([7., 9.]))
df = tickstore_lib.read('SYM', date_range=DateRange(20130104, 20130105, CLOSED_OPEN), columns=None)
assert_array_equal(df['b'].values, np.array([7.]))
df = tickstore_lib.read('SYM', date_range=DateRange(20130104, 20130105, OPEN_CLOSED), columns=None)
assert_array_equal(df['b'].values, np.array([9.]))
df = tickstore_lib.read('SYM', date_range=DateRange(20130104, 20130105, OPEN_OPEN), columns=None)
assert_array_equal(df['b'].values, np.array([]))
def test_date_range_end_not_in_range(tickstore_lib):
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 1, 1, tzinfo=mktz('Europe/London'))
},
{'b': 3.,
'c': 4.,
'index': dt(2013, 1, 2, 10, 1, tzinfo=mktz('Europe/London'))
},
]
tickstore_lib._chunk_size = 1
tickstore_lib.write('SYM', DUMMY_DATA)
with patch.object(tickstore_lib._collection, 'find', side_effect=tickstore_lib._collection.find) as f:
df = tickstore_lib.read('SYM', date_range=DateRange(20130101, dt(2013, 1, 2, 9, 0)), columns=None)
assert_array_equal(df['b'].values, np.array([2.]))
assert mongo_count(tickstore_lib._collection, filter=f.call_args_list[-1][0][0]) == 1
@pytest.mark.parametrize('tz_name', ['UTC',
'Europe/London', # Sometimes ahead of UTC
'America/New_York', # Behind UTC
])
def test_date_range_default_timezone(tickstore_lib, tz_name):
"""
We assume naive datetimes are user-local
"""
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 1, 1, tzinfo=mktz(tz_name))
},
# Half-way through the year
{'b': 3.,
'c': 4.,
'index': dt(2013, 7, 1, tzinfo=mktz(tz_name))
},
]
with patch('tzlocal.get_localzone', return_value=Mock(zone=tz_name)):
tickstore_lib._chunk_size = 1
tickstore_lib.write('SYM', DUMMY_DATA)
df = tickstore_lib.read('SYM', date_range=DateRange(20130101, 20130701), columns=None)
assert df.index.tzinfo == mktz()
assert len(df) == 2
assert df.index[1] == dt(2013, 7, 1, tzinfo=mktz(tz_name))
df = tickstore_lib.read('SYM', date_range=DateRange(20130101, 20130101), columns=None)
assert len(df) == 1
assert df.index.tzinfo == mktz()
df = tickstore_lib.read('SYM', date_range=DateRange(20130701, 20130701), columns=None)
assert len(df) == 1
assert df.index.tzinfo == mktz()
def test_date_range_no_bounds(tickstore_lib):
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 1, 1, tzinfo=mktz('Europe/London'))
},
{'a': 3.,
'b': 4.,
'index': dt(2013, 1, 30, tzinfo=mktz('Europe/London'))
},
{'b': 5.,
'c': 6.,
'index': dt(2013, 2, 2, 10, 1, tzinfo=mktz('Europe/London'))
},
]
tickstore_lib._chunk_size = 1
tickstore_lib.write('SYM', DUMMY_DATA)
# 1) No start, no end
df = tickstore_lib.read('SYM', columns=None)
assert_array_equal(df['b'].values, np.array([2., 4.]))
# 1.2) Start before the real start
df = tickstore_lib.read('SYM', date_range=DateRange(20121231), columns=None)
assert_array_equal(df['b'].values, np.array([2., 4.]))
# 2.1) Only go one month out
df = tickstore_lib.read('SYM', date_range=DateRange(20130101), columns=None)
assert_array_equal(df['b'].values, np.array([2., 4.]))
# 2.2) Only go one month out
df = tickstore_lib.read('SYM', date_range=DateRange(20130102), columns=None)
assert_array_equal(df['b'].values, np.array([4.]))
# 3) No start
df = tickstore_lib.read('SYM', date_range=DateRange(end=20130102), columns=None)
assert_array_equal(df['b'].values, np.array([2.]))
# 4) Outside bounds
df = tickstore_lib.read('SYM', date_range=DateRange(end=20131212), columns=None)
assert_array_equal(df['b'].values, np.array([2., 4., 5.]))
def test_date_range_BST(tickstore_lib):
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 6, 1, 12, 00, tzinfo=mktz('Europe/London'))
},
{'a': 3.,
'b': 4.,
'index': dt(2013, 6, 1, 13, 00, tzinfo=mktz('Europe/London'))
},
]
tickstore_lib._chunk_size = 1
tickstore_lib.write('SYM', DUMMY_DATA)
df = tickstore_lib.read('SYM', columns=None)
assert_array_equal(df['b'].values, np.array([2., 4.]))
# df = tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1, 12),
# dt(2013, 6, 1, 13)))
# assert_array_equal(df['b'].values, np.array([2., 4.]))
df = tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1, 12, tzinfo=mktz('Europe/London')),
dt(2013, 6, 1, 13, tzinfo=mktz('Europe/London'))))
assert_array_equal(df['b'].values, np.array([2., 4.]))
df = tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1, 12, tzinfo=mktz('UTC')),
dt(2013, 6, 1, 13, tzinfo=mktz('UTC'))))
assert_array_equal(df['b'].values, np.array([4., ]))
def test_read_no_data(tickstore_lib):
with pytest.raises(NoDataFoundException):
tickstore_lib.read('missing_sym', DateRange(20131212, 20131212))
def test_write_no_tz(tickstore_lib):
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 6, 1, 12, 00)
}]
with pytest.raises(ValueError):
tickstore_lib.write('SYM', DUMMY_DATA)
def test_read_out_of_order(tickstore_lib):
data = [{'A': 120, 'D': 1}, {'A': 122, 'B': 2.0}, {'A': 3, 'B': 3.0, 'D': 1}]
tick_index = [dt(2013, 6, 1, 12, 00, tzinfo=mktz('UTC')),
dt(2013, 6, 1, 11, 00, tzinfo=mktz('UTC')), # Out-of-order
dt(2013, 6, 1, 13, 00, tzinfo=mktz('UTC'))]
data = pd.DataFrame(data, index=tick_index)
tickstore_lib._chunk_size = 3
tickstore_lib.write('SYM', data)
tickstore_lib.read('SYM', columns=None)
assert len(tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1, tzinfo=mktz('UTC')), dt(2013, 6, 2, tzinfo=mktz('UTC'))))) == 3
assert len(tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1, tzinfo=mktz('UTC')), dt(2013, 6, 1, 12, tzinfo=mktz('UTC'))))) == 2
def test_read_chunk_boundaries(tickstore_lib):
SYM1_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 6, 1, 12, 00, tzinfo=mktz('UTC'))
},
{'a': 3.,
'b': 4.,
'index': dt(2013, 6, 1, 13, 00, tzinfo=mktz('UTC'))
},
# Chunk boundary here
{'a': 5.,
'b': 6.,
'index': dt(2013, 6, 1, 14, 00, tzinfo=mktz('UTC'))
}
]
SYM2_DATA = [
{'a': 7.,
'b': 8.,
'index': dt(2013, 6, 1, 12, 30, tzinfo=mktz('UTC'))
},
{'a': 9.,
'b': 10.,
'index': dt(2013, 6, 1, 13, 30, tzinfo=mktz('UTC'))
},
# Chunk boundary here
{'a': 11.,
'b': 12.,
'index': dt(2013, 6, 1, 14, 30, tzinfo=mktz('UTC'))
}
]
tickstore_lib._chunk_size = 2
tickstore_lib.write('SYM1', SYM1_DATA)
tickstore_lib.write('SYM2', SYM2_DATA)
assert len(tickstore_lib.read('SYM1', columns=None, date_range=DateRange(dt(2013, 6, 1, 12, 45, tzinfo=mktz('UTC')), dt(2013, 6, 1, 15, 00, tzinfo=mktz('UTC'))))) == 2
assert len(tickstore_lib.read('SYM2', columns=None, date_range=DateRange(dt(2013, 6, 1, 12, 45, tzinfo=mktz('UTC')), dt(2013, 6, 1, 15, 00, tzinfo=mktz('UTC'))))) == 2
assert len(tickstore_lib.read(['SYM1', 'SYM2'], columns=None, date_range=DateRange(dt(2013, 6, 1, 12, 45, tzinfo=mktz('UTC')), dt(2013, 6, 1, 15, 00, tzinfo=mktz('UTC'))))) == 4
def test_read_spanning_chunks(tickstore_lib):
SYM1_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 6, 1, 11, 00, tzinfo=mktz('UTC'))
},
{'a': 3.,
'b': 4.,
'index': dt(2013, 6, 1, 12, 00, tzinfo=mktz('UTC'))
},
# Chunk boundary here
{'a': 5.,
'b': 6.,
'index': dt(2013, 6, 1, 14, 00, tzinfo=mktz('UTC'))
}
]
SYM2_DATA = [
{'a': 7.,
'b': 8.,
'index': dt(2013, 6, 1, 12, 30, tzinfo=mktz('UTC'))
},
{'a': 9.,
'b': 10.,
'index': dt(2013, 6, 1, 13, 30, tzinfo=mktz('UTC'))
},
# Chunk boundary here
{'a': 11.,
'b': 12.,
'index': dt(2013, 6, 1, 14, 30, tzinfo=mktz('UTC'))
}
]
tickstore_lib._chunk_size = 2
tickstore_lib.write('SYM1', SYM1_DATA)
tickstore_lib.write('SYM2', SYM2_DATA)
# Even though the latest chunk that's the closest to the start point for SYM1 starts at 11:00, it ends before the start point,
# so we want to ignore it and start from SYM2 (12:30) instead.
assert tickstore_lib._mongo_date_range_query(
['SYM1', 'SYM2'],
date_range=DateRange(dt(2013, 6, 1, 12, 45, tzinfo=mktz('UTC')),
dt(2013, 6, 1, 15, 00, tzinfo=mktz('UTC')))) == \
{'s': {'$gte': dt(2013, 6, 1, 12, 30, tzinfo=mktz('UTC')), '$lte': dt(2013, 6, 1, 15, 0, tzinfo=mktz('UTC'))}}
def test_read_inside_range(tickstore_lib):
SYM1_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 6, 1, 0, 00, tzinfo=mktz('UTC'))
},
{'a': 3.,
'b': 4.,
'index': dt(2013, 6, 1, 1, 00, tzinfo=mktz('UTC'))
},
# Chunk boundary here
{'a': 5.,
'b': 6.,
'index': dt(2013, 6, 1, 14, 00, tzinfo=mktz('UTC'))
}
]
SYM2_DATA = [
{'a': 7.,
'b': 8.,
'index': dt(2013, 6, 1, 12, 30, tzinfo=mktz('UTC'))
},
{'a': 9.,
'b': 10.,
'index': dt(2013, 6, 1, 13, 30, tzinfo=mktz('UTC'))
},
# Chunk boundary here
{'a': 11.,
'b': 12.,
'index': dt(2013, 6, 1, 14, 30, tzinfo=mktz('UTC'))
}
]
tickstore_lib._chunk_size = 2
tickstore_lib.write('SYM1', SYM1_DATA)
tickstore_lib.write('SYM2', SYM2_DATA)
# If there are no chunks spanning the range, we still cap the start range so that we don't
# fetch SYM1's 0am--1am chunk
assert tickstore_lib._mongo_date_range_query(
['SYM1', 'SYM2'],
date_range=DateRange(dt(2013, 6, 1, 10, 0, tzinfo=mktz('UTC')),
dt(2013, 6, 1, 15, 0, tzinfo=mktz('UTC')))) == \
{'s': {'$gte': dt(2013, 6, 1, 10, 0, tzinfo=mktz('UTC')), '$lte': dt(2013, 6, 1, 15, 0, tzinfo=mktz('UTC'))}}
def test_read_longs(tickstore_lib):
DUMMY_DATA = [
{'a': 1,
'index': dt(2013, 6, 1, 12, 00, tzinfo=mktz('Europe/London'))
},
{
'b': 4,
'index': dt(2013, 6, 1, 13, 00, tzinfo=mktz('Europe/London'))
},
]
tickstore_lib._chunk_size = 3
tickstore_lib.write('SYM', DUMMY_DATA)
tickstore_lib.read('SYM', columns=None)
read = tickstore_lib.read('SYM', columns=None, date_range=DateRange(dt(2013, 6, 1), dt(2013, 6, 2)))
assert read['a'][0] == 1
assert np.isnan(read['b'][0])
def test_read_with_image(tickstore_lib):
DUMMY_DATA = [
{'a': 1.,
'index': dt(2013, 1, 1, 11, 00, tzinfo=mktz('Europe/London'))
},
{
'b': 4.,
'index': dt(2013, 1, 1, 12, 00, tzinfo=mktz('Europe/London'))
},
]
# Add an image
tickstore_lib.write('SYM', DUMMY_DATA)
tickstore_lib._collection.update_one({},
{'$set':
{'im': {'i':
{'a': 37.,
'c': 2.,
},
't': dt(2013, 1, 1, 10, tzinfo=mktz('Europe/London'))
}
}
}
)
dr = DateRange(dt(2013, 1, 1), dt(2013, 1, 2))
# tickstore_lib.read('SYM', columns=None)
df = tickstore_lib.read('SYM', columns=None, date_range=dr)
assert df['a'][0] == 1
# Read with the image as well - all columns
df = tickstore_lib.read('SYM', columns=None, date_range=dr, include_images=True)
assert set(df.columns) == set(('a', 'b', 'c'))
assert_array_equal(df['a'].values, np.array([37, 1, np.nan]))
assert_array_equal(df['b'].values, np.array([np.nan, np.nan, 4]))
assert_array_equal(df['c'].values, np.array([2, np.nan, np.nan]))
assert df.index[0] == dt(2013, 1, 1, 10, tzinfo=mktz('Europe/London'))
assert df.index[1] == dt(2013, 1, 1, 11, tzinfo=mktz('Europe/London'))
assert df.index[2] == dt(2013, 1, 1, 12, tzinfo=mktz('Europe/London'))
# Read just columns from the updates
df = tickstore_lib.read('SYM', columns=('a', 'b'), date_range=dr, include_images=True)
assert set(df.columns) == set(('a', 'b'))
assert_array_equal(df['a'].values, np.array([37, 1, np.nan]))
assert_array_equal(df['b'].values, np.array([np.nan, np.nan, 4]))
assert df.index[0] == dt(2013, 1, 1, 10, tzinfo=mktz('Europe/London'))
assert df.index[1] == dt(2013, 1, 1, 11, tzinfo=mktz('Europe/London'))
assert df.index[2] == dt(2013, 1, 1, 12, tzinfo=mktz('Europe/London'))
# Read one column from the updates
df = tickstore_lib.read('SYM', columns=('a',), date_range=dr, include_images=True)
assert set(df.columns) == set(('a',))
assert_array_equal(df['a'].values, np.array([37, 1]))
assert df.index[0] == dt(2013, 1, 1, 10, tzinfo=mktz('Europe/London'))
assert df.index[1] == dt(2013, 1, 1, 11, tzinfo=mktz('Europe/London'))
# Read just the image column
df = tickstore_lib.read('SYM', columns=['c'], date_range=dr, include_images=True)
assert set(df.columns) == set(['c'])
assert_array_equal(df['c'].values, np.array([2]))
assert df.index[0] == dt(2013, 1, 1, 10, tzinfo=mktz('Europe/London'))
def test_read_with_metadata(tickstore_lib):
metadata = {'metadata': 'important data'}
tickstore_lib.write('test', [{'index': dt(2013, 6, 1, 13, 00, tzinfo=mktz('Europe/London')), 'price': 100.50, 'ticker': 'QQQ'}], metadata=metadata)
m = tickstore_lib.read_metadata('test')
assert(metadata == m)
def test_read_strings(tickstore_lib):
df = pd.DataFrame(data={'data': ['A', 'B', 'C']},
index=pd.Index(data=[dt(2016, 1, 1, 00, tzinfo=mktz('UTC')),
dt(2016, 1, 2, 00, tzinfo=mktz('UTC')),
dt(2016, 1, 3, 00, tzinfo=mktz('UTC'))], name='date'))
tickstore_lib.write('test', df)
read_df = tickstore_lib.read('test')
assert(all(read_df['data'].values == df['data'].values))
def test_read_utf8_strings(tickstore_lib):
data = ['一', '二', '三'] # Chinese character [one, two , three]
if six.PY2:
utf8_data = data
unicode_data = [s.decode('utf8') for s in data]
else:
utf8_data = [s.encode('utf8') for s in data]
unicode_data = data
df = pd.DataFrame(data={'data': utf8_data},
index=pd.Index(data=[dt(2016, 1, 1, 00, tzinfo=mktz('UTC')),
dt(2016, 1, 2, 00, tzinfo=mktz('UTC')),
dt(2016, 1, 3, 00, tzinfo=mktz('UTC'))], name='date'))
tickstore_lib.write('test', df)
read_df = tickstore_lib.read('test')
assert(all(read_df['data'].values == np.array(unicode_data)))
def test_read_unicode_strings(tickstore_lib):
df = pd.DataFrame(data={'data': [u'一', u'二', u'三']}, # Chinese character [one, two , three]
index=pd.Index(data=[dt(2016, 1, 1, 00, tzinfo=mktz('UTC')),
dt(2016, 1, 2, 00, tzinfo=mktz('UTC')),
dt(2016, 1, 3, 00, tzinfo=mktz('UTC'))], name='date'))
tickstore_lib.write('test', df)
read_df = tickstore_lib.read('test')
assert(all(read_df['data'].values == df['data'].values))
def test_objects_fail(tickstore_lib):
class Fake(object):
def __init__(self, val):
self.val = val
def fake(self):
return self.val
df = pd.DataFrame(data={'data': [Fake(1), Fake(2)]},
index=pd.Index(data=[dt(2016, 1, 1, 00, tzinfo=mktz('UTC')),
dt(2016, 1, 2, 00, tzinfo=mktz('UTC'))], name='date'))
with pytest.raises(Exception) as e:
tickstore_lib.write('test', df)
assert('Casting object column to string failed' in str(e.value))
| lgpl-2.1 |
q1ang/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
glennlive/gnuradio-wg-grc | gr-filter/examples/fir_filter_ccc.py | 47 | 4019 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_out = blocks.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
ml-lab/pylearn2 | pylearn2/testing/skip.py | 49 | 1363 | """
Helper functions for determining which tests to skip.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from nose.plugins.skip import SkipTest
import os
from theano.sandbox import cuda
scipy_works = True
try:
import scipy
except ImportError:
# pyflakes gets mad if you set scipy to None here
scipy_works = False
sklearn_works = True
try:
import sklearn
except ImportError:
sklearn_works = False
h5py_works = True
try:
import h5py
except ImportError:
h5py_works = False
matplotlib_works = True
try:
from matplotlib import pyplot
except ImportError:
matplotlib_works = False
def skip_if_no_data():
if 'PYLEARN2_DATA_PATH' not in os.environ:
raise SkipTest()
def skip_if_no_scipy():
if not scipy_works:
raise SkipTest()
def skip_if_no_sklearn():
if not sklearn_works:
raise SkipTest()
def skip_if_no_gpu():
if cuda.cuda_available == False:
raise SkipTest('Optional package cuda disabled.')
def skip_if_no_h5py():
if not h5py_works:
raise SkipTest()
def skip_if_no_matplotlib():
if not matplotlib_works:
raise SkipTest("matplotlib and pyplot are not available")
| bsd-3-clause |
charlesll/RamPy | legacy_code/IR_dec_comb.py | 1 | 6585 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 22 07:54:05 2014
@author: charleslelosq
Carnegie Institution for Science
"""
import sys
sys.path.append("/Users/charleslelosq/Documents/RamPy/lib-charles/")
import csv
import numpy as np
import scipy
import matplotlib
import matplotlib.gridspec as gridspec
from pylab import *
from StringIO import StringIO
from scipy import interpolate
# to fit spectra we use the lmfit software of Matt Newville, CARS, university of Chicago, available on the web
from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit, fit_report
from spectratools import * #Charles' libraries and functions
from Tkinter import *
import tkMessageBox
from tkFileDialog import askopenfilename
#### We define a set of functions that will be used for fitting data
#### unfortunatly, as we use lmfit (which is convenient because it can fix or release
#### easily the parameters) we are not able to use arrays for parameters...
#### so it is a little bit long to write all the things, but in a way quite robust also...
#### gaussian and pseudovoigt functions are available in spectratools
#### if you need a voigt, fix the gaussian-to-lorentzian ratio to 1 in the parameter definition before
#### doing the data fit
def residual(pars, x, data=None, eps=None):
# unpack parameters:
# extract .value attribute for each parameter
a1 = pars['a1'].value
a2 = pars['a2'].value
f1 = pars['f1'].value
f2 = pars['f2'].value
l1 = pars['l1'].value
l2 = pars['l2'].value
# Gaussian model
peak1 = gaussian(x,a1,f1,l1)
peak2 = gaussian(x,a2,f2,l2)
model = peak1 + peak2
if data is None:
return model, peak1, peak2
if eps is None:
return (model - data)
return (model - data)/eps
##### CORE OF THE CALCULATION BELOW
#### CALLING THE DATA NAMES
tkMessageBox.showinfo(
"Open file",
"Please open the list of spectra")
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
with open(filename) as inputfile:
results = list(csv.reader(inputfile)) # we read the data list
#### LOOP FOR BEING ABLE TO TREAT MULTIPLE DATA
#### WARNING: OUTPUT ARE AUTOMATICALLY GENERATED IN A DIRECTORY CALLED "DECONV"
#### (see end) THAT SHOULD BE PRESENT !!!!!!!!!!
for lg in range(len(results)):
name = str(results[lg]).strip('[]')
name = name[1:-1] # to remove unwanted ""
sample = np.genfromtxt(name) # get the sample to deconvolute
# we set here the lower and higher bonds for the interest region
lb = 4700 ### MAY NEED TO AJUST THAT
hb = 6000
interestspectra = sample[np.where((sample[:,0] > lb)&(sample[:,0] < hb))]
ese0 = interestspectra[:,2]/abs(interestspectra[:,1]) #take ese as a percentage, we assume that the treatment was made correctly for error determination... if not, please put sigma = None
interestspectra[:,1] = interestspectra[:,1]/np.amax(interestspectra[:,1])*100 # normalise spectra to maximum, easier to handle after
sigma = abs(ese0*interestspectra[:,1]) #calculate good ese
#sigma = None # you can activate that if you are not sure about the errors
xfit = interestspectra[:,0] # region to be fitted
data = interestspectra[:,1] # region to be fitted
params = Parameters()
####################### FOR MELT:
####################### COMMENT IF NOT WANTED
# (Name, Value, Vary, Min, Max, Expr)
params.add_many(('a1', 1, True, 0, None, None),
('f1', 5200, True, 750, None, None),
('l1', 1, True, 0, None, None),
('a2', 1, True, 0, None, None),
('f2', 5400, True, None, None, None),
('l2', 1, True, None, None, None))
result = minimize(residual_melt, params, args=(xfit, data)) # fit data with leastsq model from scipy
model = fit_report(params) # the report
yout, peak1,peak2,= residual_melt(params,xfit) # the different peaks
#### We just calculate the different areas up to 4700 cmm-1 and those of the gaussians
# Select interest areas for calculating the areas of OH and H2Omol peaks
intarea45 = sample[np.where((sample[:,0]> 4100) & (sample[:,0]<4700))]
area4500 = np.trapz(intarea45[:,1],intarea45[:,0])
esearea4500 = 1/sqrt(area4500) # We assume that RELATIVE errors on areas are globally equal to 1/sqrt(Area)
# now for the gaussians
# unpack parameters:
# extract .value attribute for each parameter
a1 = pars['a1'].value
a2 = pars['a2'].value
l1 = pars['l1'].value
l2 = pars['l2'].value
AireG1 = gaussianarea(a1,l1)
AireG2 = gaussianarea(a2,l2)
##### WE DO A NICE FIGURE THAT CAN BE IMPROVED FOR PUBLICATION
fig = figure()
plot(sample[:,0],sample[:,1],'k-')
plot(xfit,yout,'r-')
plot(xfit,peak1,'b-')
plot(xfit,peak2,'b-')
xlim(lb,hb)
ylim(0,np.max(sample[:,1]))
xlabel("Wavenumber, cm$^{-1}$", fontsize = 18, fontweight = "bold")
ylabel("Absorption, a. u.", fontsize = 18, fontweight = "bold")
text(4000,np.max(intarea45[:,1])+0.03*np.max(intarea45[:,1]),('Area OH: \n'+'%.1f' % area4500),color='b',fontsize = 16)
text(4650,a1 + 0.05*a1,('Area pic 1$: \n'+ '%.1f' % AireG1),color='b',fontsize = 16)
text(5000,a2 + 0.05*a2,('OH/H$_2$O$_{mol}$: \n'+'%.3f' % ratioOH_H2O+'\n+/-'+'%.3f' % eseratioOH_H2O),color='r',fontsize = 16)
##### output of data, fitted peaks, parameters, and the figure in pdf
##### all goes into the ./deconv/ folder
name.rfind('/')
nameout = name[name.rfind('/')+1::]
namesample = nameout[0:nameout.find('.')]
pathint = str('/deconv/') # the output folder
ext1 = '_ydec.txt'
ext2 = '_params.txt'
ext3 = '.pdf'
pathout1 = pathbeg+pathint+namesample+ext1
pathout2 = pathbeg+pathint+namesample+ext2
pathout3 = pathbeg+pathint+namesample+ext3
matout = np.vstack((xfit,data,yout,peak1,peak2))
matout = np.transpose(matout)
np.savetxt(pathout1,matout) # saving the arrays of spectra
fd = os.open( pathout2, os.O_RDWR|os.O_CREAT ) # Open a file and create it if it do not exist
fo = os.fdopen(fd, "w+") # Now get a file object for the above file.
fo.write(model) # write the parameters in it
fo.close()
savefig(pathout3) # save the figure
| gpl-2.0 |
vybstat/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
leesavide/pythonista-docs | Documentation/matplotlib/mpl_examples/pylab_examples/multi_image.py | 12 | 2201 | #!/usr/bin/env python
'''
Make a set of images with a single colormap, norm, and colorbar.
It also illustrates colorbar tick labelling with a multiplier.
'''
from matplotlib.pyplot import figure, show, axes, sci
from matplotlib import cm, colors
from matplotlib.font_manager import FontProperties
from numpy import amin, amax, ravel
from numpy.random import rand
Nr = 3
Nc = 2
fig = figure()
cmap = cm.cool
figtitle = 'Multiple images'
t = fig.text(0.5, 0.95, figtitle,
horizontalalignment='center',
fontproperties=FontProperties(size=16))
cax = fig.add_axes([0.2, 0.08, 0.6, 0.04])
w = 0.4
h = 0.22
ax = []
images = []
vmin = 1e40
vmax = -1e40
for i in range(Nr):
for j in range(Nc):
pos = [0.075 + j*1.1*w, 0.18 + i*1.2*h, w, h]
a = fig.add_axes(pos)
if i > 0:
a.set_xticklabels([])
# Make some fake data with a range that varies
# somewhat from one plot to the next.
data =((1+i+j)/10.0)*rand(10,20)*1e-6
dd = ravel(data)
# Manually find the min and max of all colors for
# use in setting the color scale.
vmin = min(vmin, amin(dd))
vmax = max(vmax, amax(dd))
images.append(a.imshow(data, cmap=cmap))
ax.append(a)
# Set the first image as the master, with all the others
# observing it for changes in cmap or norm.
class ImageFollower:
'update image in response to changes in clim or cmap on another image'
def __init__(self, follower):
self.follower = follower
def __call__(self, leader):
self.follower.set_cmap(leader.get_cmap())
self.follower.set_clim(leader.get_clim())
norm = colors.Normalize(vmin=vmin, vmax=vmax)
for i, im in enumerate(images):
im.set_norm(norm)
if i > 0:
images[0].callbacksSM.connect('changed', ImageFollower(im))
# The colorbar is also based on this master image.
fig.colorbar(images[0], cax, orientation='horizontal')
# We need the following only if we want to run this interactively and
# modify the colormap:
axes(ax[0]) # Return the current axes to the first one,
sci(images[0]) # because the current image must be in current axes.
show()
| apache-2.0 |
yask123/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
github4ry/pathomx | pathomx/kernel_helpers.py | 2 | 3634 | import os
import sys
import numpy as np
import pandas as pd
import re
import io
from matplotlib.figure import Figure, AxesStack
from matplotlib.axes import Subplot
from mplstyler import StylesManager
import warnings
from . import displayobjects
from .utils import scriptdir, basedir
from IPython.core import display
from copy import deepcopy
MAGIC_TYPES = [
# Numpy
np.array, np.ndarray,
# Pandas
pd.Series, pd.DataFrame,
Figure, Subplot,
StylesManager,
# View types
displayobjects.Svg, displayobjects.Html, displayobjects.Markdown,
display.SVG
]
class PathomxTool(object):
''' Simple wrapper class that holds the output data for a given tool; This is for user-friendliness
not for use '''
def __str__(self):
return self._name
def __repr__(self):
return self._name
def __init__(self, name, *args, **kwargs):
self.__dict__.update(kwargs)
self._name = name
def pathomx_notebook_start(vars):
#for k, v in varsi.items():
# vars[k] = v
# _keep_input_vars = ['styles']
# vars['_pathomx_exclude_input_vars'] = [x for x in varsi.keys() if x not in _keep_input_vars]
# Handle IO magic
if '_io' in vars:
for k, v in vars['_io']['input'].items():
if v in vars:
vars[k] = deepcopy(vars[v])
else:
vars[k] = None
if '_rcParams' in vars:
global rcParams
from matplotlib import rcParams
# Block warnings from deprecated rcParams here
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for k, v in vars['_rcParams'].items():
rcParams[k] = v
# Legacy shim
if '_styles' in vars:
vars['styles'] = vars['_styles']
def pathomx_notebook_stop(vars):
varso = {}
if '_io' in vars:
# Handle IO magic
for k, v in vars['_io']['output'].items():
if k in vars:
vars[v] = vars[k]
else:
vars[v] = None
for k, v in vars.items():
# Check it's an accepted type for passing; and not private (starts with _)
if not k.startswith('_') and \
not k in vars['_io']['input'].keys():
if type(v) in MAGIC_TYPES or k in vars['_pathomx_expected_output_vars']:
varso[k] = v
elif hasattr(v, '_repr_html_'):
try:
# Check if it is a bound method (not a class definition)
v._repr_html_()
except:
pass
else:
varso[k] = displayobjects.Html(v)
vars['varso'] = varso
def progress(progress):
''' Output the current progress to stdout on the remote core
this will be read from stdout and displayed in the UI '''
print("____pathomx_execute_progress_%.2f____" % progress)
class open_with_progress(io.IOBase):
def __init__(self, f, *args, **kwargs):
super(open_with_progress, self).__init__(f, *args, **kwargs)
self._fsize = os.path.getsize(f)
self._progress = None
def read(self, *args, **kwargs):
super(open_with_progress, self).read(*args, **kwargs)
self.check_and_emit_progress()
def check_and_emit_progress(self):
# We only output at 2dp so only emit when that changes
prg = round(self.tell() / self._fsize, 2)
if prg != self._progress:
self._progress = prg
progress(prg)
| gpl-3.0 |
ankurankan/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 42 | 7098 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals import six
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
weight = X[rows[:, np.newaxis], cols].sum()
cut = (X[row_complement[:, np.newaxis], cols].sum() +
X[rows[:, np.newaxis], col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(six.iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in xrange(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
gautam1168/tardis | tardis/io/model_reader.py | 5 | 7787 | #reading different model files
import numpy as np
from numpy import recfromtxt, genfromtxt
import pandas as pd
from astropy import units as u
import logging
# Adding logging support
logger = logging.getLogger(__name__)
from tardis.util import parse_quantity
class ConfigurationError(Exception):
pass
def read_density_file(density_filename, density_filetype, time_explosion, v_inner_boundary=0.0, v_outer_boundary=np.inf):
"""
read different density file formats
Parameters
----------
density_filename: ~str
filename or path of the density file
density_filetype: ~str
type of the density file
time_explosion: ~astropy.units.Quantity
time since explosion used to scale the density
"""
file_parsers = {'artis': read_artis_density,
'simple_ascii': read_simple_ascii_density}
time_of_model, index, v_inner, v_outer, unscaled_mean_densities = file_parsers[density_filetype](density_filename)
mean_densities = calculate_density_after_time(unscaled_mean_densities, time_of_model, time_explosion)
if v_inner_boundary > v_outer_boundary:
raise ConfigurationError('v_inner_boundary > v_outer_boundary '
'({0:s} > {1:s}). unphysical!'.format(
v_inner_boundary, v_outer_boundary))
if (not np.isclose(v_inner_boundary, 0.0 * u.km / u.s,
atol=1e-8 * u.km / u.s)
and v_inner_boundary > v_inner[0]):
if v_inner_boundary > v_outer[-1]:
raise ConfigurationError('Inner boundary selected outside of model')
inner_boundary_index = v_inner.searchsorted(v_inner_boundary) - 1
else:
inner_boundary_index = None
v_inner_boundary = v_inner[0]
logger.warning("v_inner_boundary requested too small for readin file."
" Boundary shifted to match file.")
if not np.isinf(v_outer_boundary) and v_outer_boundary < v_outer[-1]:
outer_boundary_index = v_outer.searchsorted(v_outer_boundary) + 1
else:
outer_boundary_index = None
v_outer_boundary = v_outer[-1]
logger.warning("v_outer_boundary requested too large for readin file. Boundary shifted to match file.")
v_inner = v_inner[inner_boundary_index:outer_boundary_index]
v_inner[0] = v_inner_boundary
v_outer = v_outer[inner_boundary_index:outer_boundary_index]
v_outer[-1] = v_outer_boundary
mean_densities = mean_densities[inner_boundary_index:outer_boundary_index]
return v_inner, v_outer, mean_densities, inner_boundary_index, outer_boundary_index
def read_abundances_file(abundance_filename, abundance_filetype, inner_boundary_index=None, outer_boundary_index=None):
"""
read different density file formats
Parameters
----------
abundance_filename: ~str
filename or path of the density file
abundance_filetype: ~str
type of the density file
inner_boundary_index: int
index of the inner shell, default None
outer_boundary_index: int
index of the outer shell, default None
"""
file_parsers = {'simple_ascii': read_simple_ascii_abundances,
'artis': read_simple_ascii_abundances}
index, abundances = file_parsers[abundance_filetype](abundance_filename)
if outer_boundary_index is not None:
outer_boundary_index_m1 = outer_boundary_index - 1
else:
outer_boundary_index_m1 = None
index = index[inner_boundary_index:outer_boundary_index]
abundances = abundances.ix[:, slice(inner_boundary_index, outer_boundary_index_m1)]
abundances.columns = np.arange(len(abundances.columns))
return index, abundances
def read_simple_ascii_density(fname):
"""
Reading a density file of the following structure (example; lines starting with a hash will be ignored):
The first density describes the mean density in the center of the model and is not used.
5 s
#index velocity [km/s] density [g/cm^3]
0 1.1e4 1.6e8
1 1.2e4 1.7e8
Parameters
----------
fname: str
filename or path with filename
Returns
-------
time_of_model: ~astropy.units.Quantity
time at which the model is valid
data: ~pandas.DataFrame
data frame containing index, velocity (in km/s) and density
"""
with open(fname) as fh:
time_of_model_string = fh.readline().strip()
time_of_model = parse_quantity(time_of_model_string)
data = recfromtxt(fname, skip_header=1, names=('index', 'velocity', 'density'), dtype=(int, float, float))
velocity = (data['velocity'] * u.km / u.s).to('cm/s')
v_inner, v_outer = velocity[:-1], velocity[1:]
mean_density = (data['density'] * u.Unit('g/cm^3'))[1:]
return time_of_model, data['index'], v_inner, v_outer, mean_density
def read_artis_density(fname):
"""
Reading a density file of the following structure (example; lines starting with a hash will be ignored):
The first density describes the mean density in the center of the model and is not used.
5
#index velocity [km/s] log10(density) [log10(g/cm^3)]
0 1.1e4 1.6e8
1 1.2e4 1.7e8
Parameters
----------
fname: str
filename or path with filename
Returns
-------
time_of_model: ~astropy.units.Quantity
time at which the model is valid
data: ~pandas.DataFrame
data frame containing index, velocity (in km/s) and density
"""
with open(fname) as fh:
for i, line in enumerate(file(fname)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
artis_model_columns = ['index', 'velocities', 'mean_densities_0', 'ni56_fraction', 'co56_fraction', 'fe52_fraction',
'cr48_fraction']
artis_model = recfromtxt(fname, skip_header=2, usecols=(0, 1, 2, 4, 5, 6, 7), unpack=True,
dtype=[(item, np.float64) for item in artis_model_columns])
velocity = u.Quantity(artis_model['velocities'], 'km/s').to('cm/s')
mean_density = u.Quantity(10 ** artis_model['mean_densities_0'], 'g/cm^3')
v_inner, v_outer = velocity[:-1], velocity[1:]
return time_of_model, artis_model['index'], v_inner, v_outer, mean_density
def read_simple_ascii_abundances(fname):
"""
Reading an abundance file of the following structure (example; lines starting with hash will be ignored):
The first line of abundances describe the abundances in the center of the model and are not used.
#index element1, element2, ..., element30
0 0.4 0.3, .. 0.2
Parameters
----------
fname: str
filename or path with filename
Returns
-------
index: ~np.ndarray
containing the indices
abundances: ~pandas.DataFrame
data frame containing index, element1 - element30 and columns according to the shells
"""
data = np.loadtxt(fname)
index = data[1:,0].astype(int)
abundances = pd.DataFrame(data[1:,1:].transpose(), index=np.arange(1, data.shape[1]))
return index, abundances
def calculate_density_after_time(densities, time_0, time_explosion):
"""
scale the density from an initial time of the model to the time of the explosion by ^-3
Parameters:
-----------
densities: ~astropy.units.Quantity
densities
time_0: ~astropy.units.Quantity
time of the model
time_explosion: ~astropy.units.Quantity
time to be scaled to
Returns:
--------
scaled_density
"""
return densities * (time_explosion / time_0) ** -3
| bsd-3-clause |
kagayakidan/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
energyPATHWAYS/energyPATHWAYS | energyPATHWAYS/dispatch_maintenance.py | 1 | 7610 |
from pyomo.environ import *
import numpy as np
import util
import config as cfg
import pdb
import pandas as pd
import copy
import dispatch_budget
import logging
def surplus_capacity(model):
return model.surplus_capacity + model.peak_penalty * model.weight_on_peak_penalty
def define_penalty_to_preference_high_cost_gen_maint_during_peak(model):
# if forced to choose between having high cost or low cost gen be on maintenance when load is high, we'd rather high cost gen be doing maintenance
# this should lower production cost overall and make maintenance schedules less random
return model.peak_penalty == sum([sum([model.marginal_costs[g]*model.max_load_by_group[i]*model.scheduled_maintenance[i, g] for g in model.g])
for i in model.i])
def feasible_maintenance_constraint_0(model, i, g):
return model.scheduled_maintenance[i, g] >= 0
def feasible_maintenance_constraint_1(model, i, g):
return model.scheduled_maintenance[i, g] <= 1
def define_available_gen(model, i):
return model.available_gen[i] == sum([(1 - model.scheduled_maintenance[i, g]) * model.pmax[g] for g in model.g])
def meet_maintenance_constraint(model, g):
# average maintenance across the hours == annual maintenance rate
return sum([model.scheduled_maintenance[i, g] * model.group_lengths[i] for i in model.i]) == model.annual_maintenace_hours[g]
def define_surplus_capacity(model, i):
return model.surplus_capacity >= model.available_gen[i] - model.max_load_by_group[i]
def scale_load_to_system(load, pmaxs, typical_reserve=1.15):
max_load = load.max()
sum_cap = sum(pmaxs)
if (max_load * typical_reserve) > sum_cap:
assert max_load != 0
load2 = load * (sum_cap / (max_load * typical_reserve))
return load2
else:
return load
def schedule_generator_maintenance(load, pmaxs, annual_maintenance_rates, dispatch_periods, marginal_costs, print_opt=False):
# annual maintenance rates must be between zero and one
annual_maintenance_rates = np.clip(annual_maintenance_rates, 0, 1)
# gives the index for the change between dispatch_periods
group_cuts = list(np.where(np.diff(dispatch_periods) != 0)[0] + 1) if dispatch_periods is not None else None
group_lengths = np.array([group_cuts[0]] + list(np.diff(group_cuts)) + [len(load) - group_cuts[-1]])
num_groups = len(group_cuts) + 1
# necessary to scale load in some cases for the optimization to work. Basically, load shouldn't be > gen
load_scaled = scale_load_to_system(load, pmaxs)
max_load_by_group = np.array([np.max(ls) for ls in np.array_split(load_scaled, np.array(group_cuts))])
annual_maintenace_hours = annual_maintenance_rates*len(load)
pmaxs_zero = np.nonzero(pmaxs==0)[0]
pmaxs_not_zero = np.nonzero(pmaxs)[0]
estimated_peak_penalty = sum(sum(np.outer(marginal_costs[pmaxs_not_zero],max_load_by_group).T*annual_maintenance_rates[pmaxs_not_zero]))
estimated_surplus_capacity = (pmaxs.sum() - max_load_by_group.min())*(1-annual_maintenance_rates.mean())
weight_on_peak_penalty = estimated_surplus_capacity/estimated_peak_penalty/10.
model = ConcreteModel()
# INPUT PARAMS
model.i = RangeSet(0, num_groups - 1)
model.g = RangeSet(0, len(pmaxs_not_zero) - 1)
model.annual_maintenace_hours = Param(model.g, initialize=dict(zip(model.g.keys(), annual_maintenace_hours[pmaxs_not_zero])))
model.pmax = Param(model.g, initialize=dict(zip(model.g.keys(), pmaxs[pmaxs_not_zero])))
model.marginal_costs = Param(model.g, initialize=dict(zip(model.g.keys(), marginal_costs[pmaxs_not_zero])))
model.max_load_by_group = Param(model.i, initialize=dict(zip(model.i.keys(), max_load_by_group)))
model.group_lengths = Param(model.i, initialize=dict(zip(model.i.keys(), group_lengths)))
model.weight_on_peak_penalty = Param(default=weight_on_peak_penalty)
# DECISIONS VARIABLES
model.available_gen = Var(model.i, within=NonNegativeReals)
model.scheduled_maintenance = Var(model.i, model.g, within=NonNegativeReals)
model.surplus_capacity = Var(within=NonNegativeReals)
model.peak_penalty = Var(within=NonNegativeReals)
# CONSTRAINTS
model.define_available_gen = Constraint(model.i, rule=define_available_gen)
model.feasible_maintenance_constraint_0 = Constraint(model.i, model.g, rule=feasible_maintenance_constraint_0)
model.feasible_maintenance_constraint_1 = Constraint(model.i, model.g, rule=feasible_maintenance_constraint_1)
model.meet_maintenance_constraint = Constraint(model.g, rule=meet_maintenance_constraint)
model.define_surplus_capacity = Constraint(model.i, rule=define_surplus_capacity)
model.define_penalty_to_preference_high_cost_gen_maint_during_peak = Constraint(rule=define_penalty_to_preference_high_cost_gen_maint_during_peak)
# OBJECTIVE
model.objective = Objective(rule=surplus_capacity, sense=minimize)
# SOLVE AND EXPORT RESULTS
solver = SolverFactory(cfg.solver_name or "cbc") # use cbc by default for testing, when you import config in a test, solver_name is None
results = solver.solve(model, tee=print_opt)
model.solutions.load_from(results)
scheduled_maintenance = np.empty((num_groups, len(pmaxs)))
scheduled_maintenance[:, pmaxs_zero] = annual_maintenance_rates[pmaxs_zero]
scheduled_maintenance[:, pmaxs_not_zero] = np.array([[model.scheduled_maintenance[i, g].value for i in model.i.keys()] for g in model.g.keys()]).T
return scheduled_maintenance
def schedule_generator_maintenance_loop(load, pmaxs, annual_maintenance_rates, dispatch_periods, scheduling_order):
# if nothing else, better to schedule the large generators first
scheduling_order = np.argsort(-pmaxs) if scheduling_order is None else scheduling_order
# annual maintenance rates must be between zero and one
annual_maintenance_rates = np.clip(annual_maintenance_rates, 0, 1)
# gives the index for the change between dispatch_periods
group_cuts = list(np.where(np.diff(dispatch_periods) != 0)[0] + 1) if dispatch_periods is not None else None
group_lengths = np.array([group_cuts[0]] + list(np.diff(group_cuts)) + [len(load) - group_cuts[-1]])
num_groups = len(group_cuts) + 1
# necessary to scale load in some cases for the optimization to work. Basically, load shouldn't be > gen
load_scaled = scale_load_to_system(load, pmaxs)
load_scaled = np.concatenate([[np.max(ls)]*gl for gl, ls in zip(group_lengths, np.array_split(load_scaled, np.array(group_cuts)))])
pmaxs_clipped = copy.deepcopy(pmaxs)
pmaxs_clipped = np.clip(pmaxs_clipped, 1e-1, None)
maintenance_energy = annual_maintenance_rates*pmaxs_clipped*len(load)
scheduled_maintenance = np.zeros((num_groups, len(pmaxs)))
# loop through and schedule maintenance for each generator one at a time. Update the net load after each one.
for i in scheduling_order:
energy_allocation = dispatch_budget.dispatch_to_energy_budget(load_scaled, -maintenance_energy[i], pmins=0, pmaxs=pmaxs_clipped[i])
scheduled_maintenance[:, i] = np.clip(np.array([np.mean(ls) for ls in np.array_split(energy_allocation, np.array(group_cuts))])/pmaxs_clipped[i], 0, 1)
load_scaled += np.concatenate([[sm * pmaxs[i]]*gl for gl, sm in zip(group_lengths, scheduled_maintenance[:, i])])
if not all(np.isclose(annual_maintenance_rates, (scheduled_maintenance.T * group_lengths).sum(axis=1)/len(load))):
logging.warning("scheduled maintance rates don't all match the annual maintenance rates")
return scheduled_maintenance | mit |
GermanRuizMarcos/Classical-Composer-Classification | code_10_1/classification.py | 1 | 30838 | '''
AUDIO CLASSICAL COMPOSER IDENTIFICATION BASED ON:
A SPECTRAL BANDWISE FEATURE-BASED SYSTEM
'''
import essentia
from essentia.standard import *
import glob
import numpy as np
import arff
from scipy import stats
import collections
import cv2
import matplotlib
import matplotlib.pyplot as plt
#### gabor filters
def build_filters():
filters = []
ksize = 31
for theta in np.arange(0, np.pi, np.pi / 16):
kern = cv2.getGaborKernel((ksize, ksize), 4.0, theta, 10.0, 0.5, 0, ktype=cv2.CV_32F)
kern /= 1.5*kern.sum()
filters.append(kern)
return filters
def process(img, filters):
accum = np.zeros_like(img)
for kern in filters:
fimg = cv2.filter2D(img, cv2.CV_8UC3, kern)
np.maximum(accum, fimg, accum)
return accum
###
# Dataset creation with specific attributes (spectral features) and a specific class (composer's name)
'''
Audio files trasformed into the frequency domain through a 1024-sample STFT with 50% overlap.
The spectrum is divided into 50 mel-spaced bands.
'''
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/bach/*.wav")
fft = FFT()
melbands = MelBands(numberBands = 50)
flatness = FlatnessDB()
rolloff = RollOff()
centroid = SpectralCentroidTime()
flux = Flux()
energy = EnergyBand()
zero = ZeroCrossingRate()
spectrum = Spectrum()
w = Windowing(type = 'hann')
mfcc = MFCC()
silence = SilenceRate(thresholds = [0.01])
f = open('definitive_train.txt', 'wb')
f.write('@RELATION "composer dataset"\n')
f.write('\n')
f.write('@ATTRIBUTE filename STRING\n')
f.write('@ATTRIBUTE MFCC-0 REAL\n')
f.write('@ATTRIBUTE MFCC-1 REAL\n')
f.write('@ATTRIBUTE MFCC-2 REAL\n')
f.write('@ATTRIBUTE MFCC-3 REAL\n')
f.write('@ATTRIBUTE MFCC-4 REAL\n')
f.write('@ATTRIBUTE MFCC-5 REAL\n')
f.write('@ATTRIBUTE MFCC-6 REAL\n')
f.write('@ATTRIBUTE MFCC-7 REAL\n')
f.write('@ATTRIBUTE MFCC-8 REAL\n')
f.write('@ATTRIBUTE MFCC-9 REAL\n')
f.write('@ATTRIBUTE MFCC-10 REAL\n')
f.write('@ATTRIBUTE MFCC-11 REAL\n')
f.write('@ATTRIBUTE MFCC-12 REAL\n')
f.write('@ATTRIBUTE flatness-mean REAL\n')
f.write('@ATTRIBUTE flatness-variance REAL\n')
f.write('@ATTRIBUTE rolloff-mean REAL\n')
f.write('@ATTRIBUTE rolloff-variance REAL\n')
f.write('@ATTRIBUTE centroid-mean REAL\n')
f.write('@ATTRIBUTE centroid-variance REAL\n')
f.write('@ATTRIBUTE flux-mean REAL\n')
f.write('@ATTRIBUTE flux-variance REAL\n')
f.write('@ATTRIBUTE energy-mean REAL\n')
f.write('@ATTRIBUTE energy-variance REAL\n')
f.write('@ATTRIBUTE ZCR-mean REAL\n')
f.write('@ATTRIBUTE ZCR-variance REAL\n')
f.write('@ATTRIBUTE flatness-std REAL\n')
f.write('@ATTRIBUTE flatness-hmean REAL\n')
f.write('@ATTRIBUTE silences REAL\n')
f.write('@ATTRIBUTE gaborfilter-mean REAL\n')
f.write('@ATTRIBUTE gaborfilter-variance REAL\n')
f.write('@ATTRIBUTE composer {bach, beethoven, chopin, haydn, liszt, mendelssohn, mozart, vivaldi}\n')
f.write('\n')
f.write('@DATA\n')
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/bach'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'bach'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('bach')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 2
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/beethoven/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/beethoven'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'beethoven'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('beethoven')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 3
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/chopin/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/chopin'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'chopin'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('chopin')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 4
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/haydn/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/haydn'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'haydn'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('haydn')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
'''
# 5
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/liszt/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/liszt'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'liszt'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
'''
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('liszt')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 6
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/mendelssohn/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/mendelssohn'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'mendelssohn'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('mendelssohn')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 7
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/mozart/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/mozart'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'mozart'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('mozart')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
# 8
dirList = glob.glob("/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/datasets/vivaldi/*.wav")
dirimg = '/home/usuario/Escritorio/SMC/2 term/Music Information Retrieval/Classical Composer Identification/code_10/pictures/vivaldi'
dirname = str(dirimg) +'/*.png'
piclist = glob.glob(dirname)
counter = 0
for audio_file in dirList:
# Selecting the expectrogram
for item in piclist:
if item.split('/')[-1].split('.')[0] == audio_file.split('/')[-1].split('.')[0]:
picname = str(dirimg)+'/'+str(audio_file.split('/')[-1].split('.')[0]) + '.png'
flat = []
rol = []
cen = []
flu = []
ene = []
zer = []
mfccs = []
stft = []
sil = []
mean_counter = []
# Loading audio
audio = MonoLoader(filename = audio_file)()
# Features extraction
for frame in FrameGenerator(audio, frameSize = 1024, hopSize = 512, startFromZero=True):
bands = melbands(spectrum(frame))
stft.append(fft(frame))
flat.append(flatness(bands))
rol.append(rolloff(bands))
cen.append(centroid(bands))
flu.append(flux(bands))
ene.append(energy(bands))
zer.append(zero(frame))
mfcc_bands, mfcc_coeffs = mfcc(spectrum(w(frame)))
mfccs.append(mfcc_coeffs)
sil.append(silence(frame))
rate = collections.Counter()
rate.update(sil)
rate = rate.most_common(1)
composer = 'vivaldi'
# Gabor filter analysis
if __name__ == '__main__':
import sys
print __doc__
try:
img_fn = sys.argv[1]
except:
img_fn = picname
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
filters = build_filters()
res1 = process(img, filters)
for i in range(len(res1)-1):
for j in range(len(res1[i])-1):
mean_counter.append(np.mean(res1[i][j]))
f.write('%s' %audio_file.split('/')[-1].split('.')[0].split('vivaldi')[0])
f.write(',')
f.write('%r' %np.mean(mfccs[0]))
f.write(',')
f.write('%r' %np.mean(mfccs[1]))
f.write(',')
f.write('%r' %np.mean(mfccs[2]))
f.write(',')
f.write('%r' %np.mean(mfccs[3]))
f.write(',')
f.write('%r' %np.mean(mfccs[4]))
f.write(',')
f.write('%r' %np.mean(mfccs[5]))
f.write(',')
f.write('%r' %np.mean(mfccs[6]))
f.write(',')
f.write('%r' %np.mean(mfccs[7]))
f.write(',')
f.write('%r' %np.mean(mfccs[8]))
f.write(',')
f.write('%r' %np.mean(mfccs[9]))
f.write(',')
f.write('%r' %np.mean(mfccs[10]))
f.write(',')
f.write('%r' %np.mean(mfccs[11]))
f.write(',')
f.write('%r' %np.mean(mfccs[12]))
f.write(',')
f.write('%r' %np.mean(flat))
f.write(',')
f.write('%r' %np.var(flat))
f.write(',')
f.write('%r' %np.mean(rol))
f.write(',')
f.write('%r' %np.var(rol))
f.write(',')
f.write('%r' %np.mean(cen))
f.write(',')
f.write('%r' %np.var(cen))
f.write(',')
f.write('%r' %np.mean(flu))
f.write(',')
f.write('%r' %np.var(flu))
f.write(',')
f.write('%r' %np.mean(ene))
f.write(',')
f.write('%r' %np.var(ene))
f.write(',')
f.write('%r' %np.mean(zer))
f.write(',')
f.write('%r' %np.var(zer))
f.write(',')
f.write('%r' %np.std(flat))
f.write(',')
f.write('%r' %stats.hmean(flat))
f.write(',')
f.write('%r' %rate[0][1])
f.write(',')
f.write('%r' %np.var(mean_counter))
f.write(',')
f.write('%r' %np.std(mean_counter))
f.write(',')
f.write('%s' %composer)
f.write('\n')
counter += 1
f.write('%\n')
f.write('%\n')
f.write('%\n')
f.close()
| gpl-3.0 |
sgenoud/scikit-learn | sklearn/mixture/tests/test_gmm.py | 3 | 12260 | import itertools
import unittest
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal, \
assert_raises
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
rng = np.random.RandomState(0)
def test_sample_gaussian():
"""
Test sample generation from mixture.sample_gaussian where covariance
is diagonal, spherical and full
"""
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert np.allclose(samples.mean(axis), mu, atol=1.3)
assert np.allclose(samples.var(axis), cv, atol=1.5)
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert np.allclose(samples.mean(axis), mu, atol=1.5)
assert np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5)
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert np.allclose(samples.mean(axis), mu, atol=1.3)
assert np.allclose(np.cov(samples), cv, atol=2.5)
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(itertools.izip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
"""
test a slow and naive implementation of lmvnpdf and
compare it to the vectorized version (mixture.lmvnpdf) to test
for correctness
"""
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert g.n_components == n_components
assert g.covariance_type == covariance_type
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {'spherical': (0.1 + 2 * \
rng.rand(self.n_components, self.n_features)) ** 2,
'tied': make_spd_matrix(self.n_features, random_state=0) +\
5 * self.I,
'diag': (0.1 + 2 * rng.rand(self.n_components,\
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features,\
random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(range(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.eval(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEquals(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for iter in xrange(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
""" Train on degenerate data with 0 in some dimensions
"""
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
""" Train on 1-D data
"""
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
#X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
"""Test that multiple inits does not much worse than a single one"""
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert train2 >= train1 - 1.e-2
def test_n_parameters():
"""Test that the right number of parameters is estimated"""
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert g._n_parameters() == n_params[cv_type]
def test_aic():
""" Test the aic and bic criteria"""
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert np.abs(g.aic(X) - aic) / n_samples < bound
assert np.abs(g.bic(X) - bic) / n_samples < bound
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
vickyting0910/opengeocoding | 2reinter.py | 1 | 3991 | import pandas as pd
import glob
import time
import numpy as num
inter=sorted(glob.glob('*****.csv'))
w='*****.xlsx'
table1=pd.read_excel(w, '*****', index_col=None, na_values=['NA']).fillna(0)
w='*****.csv'
tab=pd.read_csv(w).fillna(0)
tab.is_copy = False
pd.options.mode.chained_assignment = None
t1=time.time()
for i in range(len(tab)):
if tab["IBR"][i]=='9A' or tab["IBR"][i] == '9B' or tab["IBR"][i] == '09A' or tab["IBR"][i] == '09B':
tab["IBR"][i]='9'
if tab["IBR"][i]=='11A' or tab["IBR"][i] == '11B' or tab["IBR"][i]=='11C' or tab["IBR"][i] == '11D' or tab["IBR"][i]=='36B':
tab["IBR"][i]='11'
if tab["IBR"][i]=='36A' or tab["IBR"][i] == '36B':
tab["IBR"][i]='36'
if tab["IBR"][i]=='13A' or tab["IBR"][i] == '13B' or tab["IBR"][i] == '13C':
tab["IBR"][i]='13'
if tab["IBR"][i]=='23A' or tab["IBR"][i] == '23B' or tab["IBR"][i] == '23E' or tab["IBR"][i] == '23F' or tab["IBR"][i] == '23H':
tab["IBR"][i]='23'
if tab["IBR"][i]=='26A' or tab["IBR"][i] == '26B' or tab["IBR"][i] == '26C' or tab["IBR"][i] == '26D' or tab["IBR"][i] == '26E':
tab["IBR"][i]='26'
if tab["IBR"][i]=='35A' or tab["IBR"][i] == '35B':
tab["IBR"][i]='35'
if tab["IBR"][i]=='36A':
tab["IBR"][i]='36'
if tab["IBR"][i]=='39A' or tab["IBR"][i] == '39B' or tab["IBR"][i] == '39C' or tab["IBR"][i] == '39D':
tab["IBR"][i]='39'
if tab["IBR"][i]=='40A' or tab["IBR"][i] == '40B' or tab["IBR"][i] == '40C':
tab["IBR"][i]='40'
if tab["IBR"][i]=='64A' or tab["IBR"][i] == '64B':
tab["IBR"][i]='64'
if tab["IBR"][i]=='90A' or tab["IBR"][i] == '90B' or tab["IBR"][i] == '90C' or tab["IBR"][i] == '90H' or tab["IBR"][i] == '90F' or tab["IBR"][i] == '90G' or tab["IBR"][i]=='90J' or tab["IBR"][i]=='90Z':
tab["IBR"][i]='90'
#convert to string for the join
for i in range(len(table1)):
table1['IBR_code'][i]=str(table1['IBR_code'][i])
description=table1.set_index([ "IBR_code"])
t2=time.time()
print t2-t1
#index crime
tab["index"]=num.nan
for i in range(len(tab)): #convert to integer
tab["index"][i]=tab.index[i]+1
#join
tab=tab.join(description, on=["IBR"], sort=True, rsuffix='_1', how='outer').fillna(0)
tab=tab[(tab["Reported_address"] != 0)].reset_index(drop=True).fillna(0)
tab["IBR_description"]=tab["crime_des12"]
t3=time.time()
print t3-t2
tab=tab[["Global_ID","Reported_address","Incident_date","Incident_time","Report_date","Report_time","Latitude","Longitude","IBR","IBR_description","Police_Department_Code","PD_description","State_Statute_Literal","State_Statute_Number","flag_geocode",'Fdir_n1','Edir_n1','strname_n1','strtype_n1','Enum_n1','Fdir_n2','Edir_n2','strname_n2','strtype_n2','Enum_n2','comname','mroad1','mratio1','wcorr1','wratio1','mroad2','mratio2','wcorr2','wratio2','match']]
tab=tab.replace("",num.nan)
tab=tab.replace("0",num.nan)
tab=tab.replace("00",num.nan)
tab=tab.replace(0,num.nan)
tab.to_csv('*****.csv',index=False)
for i in range(len(tab)):
tab['Global_ID'][i]=str(tab['Global_ID'][i])
description=tab.set_index([ "Global_ID"])
name1=[i[i.find('inter'):i.rfind('C.csv')+1].replace('_matchgeo','') for i in inter]
for p, q in zip((inter), (name1)):
table1=pd.read_csv(p)
for i in range(len(table1)):
tab['Global_ID'][i]=str(tab['Global_ID'][i])
table1=table1.join(description, on=["Global_ID"], sort=True, rsuffix='_1', how='outer').fillna(0)
table1=table1[(table1["Reported_address"] != 0)].reset_index(drop=True).fillna(0)
table1["IBR_description"]=table1["IBR_description_1"]
table1["IBR"]=table1["IBR_1"]
table1=table1[["Global_ID","Reported_address","Incident_date","Incident_time","Report_date","Report_time","Latitude","Longitude","IBR","IBR_description","Police_Department_Code","PD_description","State_Statute_Literal","State_Statute_Number","flag_geocode",'Fdir_n1','Edir_n1','strname_n1','strtype_n1','Enum_n1','Fdir_n2','Edir_n2','strname_n2','strtype_n2','Enum_n2','comname','mroad1','mratio1','wcorr1','wratio1','mroad2','mratio2','wcorr2','wratio2','match']]
table1.to_csv('*****.csv',index=False)
| bsd-2-clause |
waynenilsen/statsmodels | examples/python/robust_models_0.py | 33 | 2992 |
## Robust Linear Models
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
# ## Estimation
#
# Load data:
data = sm.datasets.stackloss.load()
data.exog = sm.add_constant(data.exog)
# Huber's T norm with the (default) median absolute deviation scaling
huber_t = sm.RLM(data.endog, data.exog, M=sm.robust.norms.HuberT())
hub_results = huber_t.fit()
print(hub_results.params)
print(hub_results.bse)
print(hub_results.summary(yname='y',
xname=['var_%d' % i for i in range(len(hub_results.params))]))
# Huber's T norm with 'H2' covariance matrix
hub_results2 = huber_t.fit(cov="H2")
print(hub_results2.params)
print(hub_results2.bse)
# Andrew's Wave norm with Huber's Proposal 2 scaling and 'H3' covariance matrix
andrew_mod = sm.RLM(data.endog, data.exog, M=sm.robust.norms.AndrewWave())
andrew_results = andrew_mod.fit(scale_est=sm.robust.scale.HuberScale(), cov="H3")
print('Parameters: ', andrew_results.params)
# See ``help(sm.RLM.fit)`` for more options and ``module sm.robust.scale`` for scale options
#
# ## Comparing OLS and RLM
#
# Artificial data with outliers:
nsample = 50
x1 = np.linspace(0, 20, nsample)
X = np.column_stack((x1, (x1-5)**2))
X = sm.add_constant(X)
sig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger
beta = [5, 0.5, -0.0]
y_true2 = np.dot(X, beta)
y2 = y_true2 + sig*1. * np.random.normal(size=nsample)
y2[[39,41,43,45,48]] -= 5 # add some outliers (10% of nsample)
# ### Example 1: quadratic function with linear truth
#
# Note that the quadratic term in OLS regression will capture outlier effects.
res = sm.OLS(y2, X).fit()
print(res.params)
print(res.bse)
print(res.predict())
# Estimate RLM:
resrlm = sm.RLM(y2, X).fit()
print(resrlm.params)
print(resrlm.bse)
# Draw a plot to compare OLS estimates to the robust estimates:
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(x1, y2, 'o',label="data")
ax.plot(x1, y_true2, 'b-', label="True")
prstd, iv_l, iv_u = wls_prediction_std(res)
ax.plot(x1, res.fittedvalues, 'r-', label="OLS")
ax.plot(x1, iv_u, 'r--')
ax.plot(x1, iv_l, 'r--')
ax.plot(x1, resrlm.fittedvalues, 'g.-', label="RLM")
ax.legend(loc="best")
# ### Example 2: linear function with linear truth
#
# Fit a new OLS model using only the linear term and the constant:
X2 = X[:,[0,1]]
res2 = sm.OLS(y2, X2).fit()
print(res2.params)
print(res2.bse)
# Estimate RLM:
resrlm2 = sm.RLM(y2, X2).fit()
print(resrlm2.params)
print(resrlm2.bse)
# Draw a plot to compare OLS estimates to the robust estimates:
prstd, iv_l, iv_u = wls_prediction_std(res2)
fig, ax = plt.subplots()
ax.plot(x1, y2, 'o', label="data")
ax.plot(x1, y_true2, 'b-', label="True")
ax.plot(x1, res2.fittedvalues, 'r-', label="OLS")
ax.plot(x1, iv_u, 'r--')
ax.plot(x1, iv_l, 'r--')
ax.plot(x1, resrlm2.fittedvalues, 'g.-', label="RLM")
ax.legend(loc="best")
| bsd-3-clause |
rvbelefonte/Rockfish2 | rockfish2/extensions/cps/model.py | 1 | 3390 | """
Tools for working with Computer Programs in Seismology velocity models
"""
import os
import numpy as np
import datetime
import pandas as pd
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from rockfish2 import logging
from rockfish2.models.profile import Profile
class CPSModel1d(Profile):
def __init__(self, *args, **kwargs):
self.NAME = kwargs.pop('name', '1D model')
self.UNITS = kwargs.pop('units', 'KGS')
self.ISOTROPY = kwargs.pop('isotropy', 'ISOTROPIC')
self.SHAPE = kwargs.pop('shape', 'FLAT EARTH')
self.DIM = kwargs.pop('dim', '1-D')
Profile.__init__(self, *args, **kwargs)
def __str__(self):
return self.write()
def write(self, path_or_buf=None, float_format='%10.6f', **kwargs):
"""
Write profile to the Computer Programs in Seismology model format
Parameters
----------
path_or_buf : string or file handle, default None
File path or object, if None is provided the result is returned as
a string.
"""
model = self.model.copy()
col = ['hr'] + [k for k in model if k != 'hr']
model['hr'] = np.concatenate((np.diff(np.asarray(model.index)), [0.0]))
model.index = np.arange(len(model))
#model = model[0:len(model) - 1]
sng = "MODEL\n"
sng += "{:}\n".format(self.NAME)
sng += "{:}\n".format(self.ISOTROPY)
sng += "{:}\n".format(self.UNITS)
sng += "{:}\n".format(self.SHAPE)
sng += "{:}\n".format(self.DIM)
sng += "CONSTANT VELOCITY\n"
sng += "#\n"
sng += "Created by: {:}{:}\n"\
.format(self.__module__, self.__class__.__name__)
sng += "Created on: {:}\n".format(datetime.datetime.now())
sng += "#\n"
sng += model[col].to_csv(sep='\t', index=False,
float_format=float_format, **kwargs)
if path_or_buf is None:
return sng
if hasattr(path_or_buf, 'write'):
path_or_buf.write(sng)
else:
f = open(path_or_buf, 'w')
f.write(sng)
def read(self, filename, sep='\t'):
"""
Write profile from the Computer Programs in Seismology model format
"""
f = open(filename, 'rb')
kind = f.readline().replace('\n', '')
assert kind.startswith('MODEL'),\
'File does not appear to be CPS format'
self.NAME = f.readline().replace('\n', '')
self.ISOTROPY = f.readline().replace('\n', '')
self.UNITS = f.readline().replace('\n', '')
self.SHAPE = f.readline().replace('\n', '')
self.DIM = f.readline().replace('\n', '')
_ = f.readline().replace('\n', '')
_ = f.readline().replace('\n', '')
_ = f.readline().replace('\n', '')
_ = f.readline().replace('\n', '')
_ = f.readline().replace('\n', '')
cols = f.readline().replace('\n', '').split()
self.model = pd.read_csv(filename, sep=sep, skiprows=11,
index_col=0)
try:
dz = self.model.index[:]
z = np.cumsum(np.asarray(dz)) - dz[0]
if z[-1] == 0:
z[-1] = dz[-2]
self.model.index = z
self.model.index.name = 'depth'
except:
pass
| gpl-2.0 |
qiwsir/vincent | examples/map_examples.py | 11 | 6721 | # -*- coding: utf-8 -*-
"""
Vincent Map Examples
"""
#Build a map from scratch
from vincent import *
world_topo = r'world-countries.topo.json'
state_topo = r'us_states.topo.json'
lake_topo = r'lakes_50m.topo.json'
county_geo = r'us_counties.geo.json'
county_topo = r'us_counties.topo.json'
or_topo = r'or_counties.topo.json'
vis = Visualization(width=960, height=500)
vis.data['countries'] = Data(
name='countries',
url=world_topo,
format={'type': 'topojson', 'feature': 'world-countries'}
)
geo_transform = Transform(
type='geopath', value="data", projection='winkel3', scale=200,
translate=[480, 250]
)
geo_from = MarkRef(data='countries', transform=[geo_transform])
enter_props = PropertySet(
stroke=ValueRef(value='#000000'),
path=ValueRef(field='path')
)
update_props = PropertySet(fill=ValueRef(value='steelblue'))
mark_props = MarkProperties(enter=enter_props, update=update_props)
vis.marks.append(
Mark(type='path', from_=geo_from, properties=mark_props)
)
vis.to_json('vega.json')
#Convenience Method
geo_data = [{'name': 'countries',
'url': world_topo,
'feature': 'world-countries'}]
vis = Map(geo_data=geo_data, scale=200)
vis.to_json('vega.json')
#States & Counties
geo_data = [{'name': 'counties',
'url': county_topo,
'feature': 'us_counties.geo'},
{'name': 'states',
'url': state_topo,
'feature': 'us_states.geo'}
]
vis = Map(geo_data=geo_data, scale=1000, projection='albersUsa')
del vis.marks[1].properties.update
vis.marks[0].properties.update.fill.value = '#084081'
vis.marks[1].properties.enter.stroke.value = '#fff'
vis.marks[0].properties.enter.stroke.value = '#7bccc4'
vis.to_json('vega.json')
#Choropleth
import json
import pandas as pd
#Map the county codes we have in our geometry to those in the
#county_data file, which contains additional rows we don't need
with open('us_counties.topo.json', 'r') as f:
get_id = json.load(f)
#A little FIPS code munging
new_geoms = []
for geom in get_id['objects']['us_counties.geo']['geometries']:
geom['properties']['FIPS'] = int(geom['properties']['FIPS'])
new_geoms.append(geom)
get_id['objects']['us_counties.geo']['geometries'] = new_geoms
with open('us_counties.topo.json', 'w') as f:
json.dump(get_id, f)
#Grab the FIPS codes and load them into a dataframe
geometries = get_id['objects']['us_counties.geo']['geometries']
county_codes = [x['properties']['FIPS'] for x in geometries]
county_df = pd.DataFrame({'FIPS': county_codes}, dtype=str)
county_df = county_df.astype(int)
#Read into Dataframe, cast to int for consistency
df = pd.read_csv('data/us_county_data.csv', na_values=[' '])
df['FIPS'] = df['FIPS'].astype(int)
#Perform an inner join, pad NA's with data from nearest county
merged = pd.merge(df, county_df, on='FIPS', how='inner')
merged = merged.fillna(method='pad')
geo_data = [{'name': 'counties',
'url': county_topo,
'feature': 'us_counties.geo'}]
vis = Map(data=merged, geo_data=geo_data, scale=1100, projection='albersUsa',
data_bind='Employed_2011', data_key='FIPS',
map_key={'counties': 'properties.FIPS'})
vis.marks[0].properties.enter.stroke_opacity = ValueRef(value=0.5)
#Change our domain for an even inteager
vis.scales['color'].domain = [0, 189000]
vis.legend(title='Number Employed 2011')
vis.to_json('vega.json')
#Lets look at different stats
vis.rebind(column='Civilian_labor_force_2011', brew='BuPu')
vis.to_json('vega.json')
vis.rebind(column='Unemployed_2011', brew='PuBu')
vis.to_json('vega.json')
vis.rebind(column='Unemployment_rate_2011', brew='YlGnBu')
vis.to_json('vega.json')
vis.rebind(column='Median_Household_Income_2011', brew='RdPu')
vis.to_json('vega.json')
#Mapping US State Level Data
state_data = pd.read_csv('data/US_Unemployment_Oct2012.csv')
geo_data = [{'name': 'states',
'url': state_topo,
'feature': 'us_states.geo'}]
vis = Map(data=state_data, geo_data=geo_data, scale=1000,
projection='albersUsa', data_bind='Unemployment', data_key='NAME',
map_key={'states': 'properties.NAME'})
vis.legend(title='Unemployment (%)')
vis.to_json('vega.json')
#Iterating State Level Data
yoy = pd.read_table('data/State_Unemp_YoY.txt', delim_whitespace=True)
#Standardize State names to match TopoJSON for keying
names = []
for row in yoy.iterrows():
pieces = row[1]['NAME'].split('_')
together = ' '.join(pieces)
names.append(together.title())
yoy['NAME'] = names
geo_data = [{'name': 'states',
'url': state_topo,
'feature': 'us_states.geo'}]
vis = Map(data=yoy, geo_data=geo_data, scale=1000,
projection='albersUsa', data_bind='AUG_2012', data_key='NAME',
map_key={'states': 'properties.NAME'}, brew='YlGnBu')
#Custom threshold scale
vis.scales[0].type='threshold'
vis.scales[0].domain = [0, 2, 4, 6, 8, 10, 12]
vis.legend(title='Unemployment (%)')
vis.to_json('vega.json')
#Rebind and set our scale again
vis.rebind(column='AUG_2013', brew='YlGnBu')
vis.scales[0].type='threshold'
vis.scales[0].domain = [0, 2, 4, 6, 8, 10, 12]
vis.to_json('vega.json')
vis.rebind(column='CHANGE', brew='YlGnBu')
vis.scales[0].type='threshold'
vis.scales[0].domain = [-1.5, -1.3, -1.1, 0, 0.1, 0.3, 0.5, 0.8]
vis.legends[0].title = "YoY Change in Unemployment (%)"
vis.to_json('vega.json')
#Oregon County-level population data
or_data = pd.read_table('data/OR_County_Data.txt', delim_whitespace=True)
or_data['July_2012_Pop']= or_data['July_2012_Pop'].astype(int)
#Standardize keys
with open('or_counties.topo.json', 'r') as f:
counties = json.load(f)
def split_county(name):
parts = name.split(' ')
parts.pop(-1)
return ''.join(parts).upper()
#A little FIPS code munging
new_geoms = []
for geom in counties['objects']['or_counties.geo']['geometries']:
geom['properties']['COUNTY'] = split_county(geom['properties']['COUNTY'])
new_geoms.append(geom)
counties['objects']['or_counties.geo']['geometries'] = new_geoms
with open('or_counties.topo.json', 'w') as f:
json.dump(counties, f)
geo_data = [{'name': 'states',
'url': state_topo,
'feature': 'us_states.geo'},
{'name': 'or_counties',
'url': or_topo,
'feature': 'or_counties.geo'}]
vis = Map(data=or_data, geo_data=geo_data, scale=3700,
translate=[1480, 830],
projection='albersUsa', data_bind='July_2012_Pop', data_key='NAME',
map_key={'or_counties': 'properties.COUNTY'})
vis.marks[0].properties.update.fill.value = '#c2c2c2'
vis.to_json('vega.json')
| mit |
AlvinPH/StockTool | StockTool/core.py | 1 | 7480 |
from . import helpers
import pandas as pd
import numpy as np
from pandas import DataFrame, Series
from pandas_datareader import data
from datetime import datetime, timedelta
import re
import os
import requests
import time
class StockInfo():
def __init__(self, StockNumber):
if isinstance(StockNumber, str) is False:
print('StockNumber must be string')
self.__StockNumber = '2330.TW'
else:
self.__StockNumber = StockNumber+'.TW'
def get_StockNumber(self):
return self.__StockNumber
def fetch_StockPrice(self, StartTime, EndTime):
# self.__StockPrice = data.DataReader(self.__StockNumber,
# 'yahoo',StartTime, EndTime)
self.__StockPrice = data.DataReader(self.__StockNumber,
'yahoo',StartTime, EndTime)
def get_StockPrice(self):
return self.__StockPrice
def fetch_StockActions(self, StartTime, EndTime):
self.__StockActions = data.DataReader(self.__StockNumber,
'yahoo-actions',StartTime, EndTime)
def get_StockActions(self):
return self.__StockActions
class Crawler():
def __init__(self, prefix='data'):
if not os.path.isdir(prefix):
os.mkdir(prefix)
self.prefix = prefix
# pass
def get_tse_one_day(self, spec_date):
date_str = '{0}{1:02d}{2:02d}'.format(spec_date.year, spec_date.month, spec_date.day)
url = 'http://www.twse.com.tw/exchangeReport/MI_INDEX'
query_params = {
'date': date_str,
'response': 'json',
'type': 'ALL',
'_': str(round(time.time() * 1000) - 500)
}
# Get json data
page = requests.get(url, params=query_params)
if not page.ok:
logging.error("Can not get TSE data at {}".format(date_str))
content = page.json()
# print(content)
# key = 'Nodata'
isoffday = True
for key in content.keys():
if isinstance(content[key], list):
if len(content[key][0]) == 16:
isoffday = False
break
if isoffday:
print('No data at this day %4d/%02d/%02d'%
(spec_date.year,spec_date.month, spec_date.day))
return -1
# For compatible with original data
# date_str_mingguo = '{0}/{1:02d}/{2:02d}'.format(spec_date.year - 1911,\
# spec_date.month, spec_date.day)
data_df = DataFrame(data=content[key],
columns=['code','name','volume','transaction','turnover',
'open','high','low','close','UD','difference',
'last_buy', 'last_buy_volume',
'last_sell','last_sell_volume','PE_ratio'])
data_df = data_df.applymap(lambda x: re.sub(",","",x))# clear comma
data_df.replace({'UD':{'<p style= color:red>+</p>':'+',
'<p style= color:green>-</p>':'-'}},
inplace=True)
return data_df
def get_otc_one_day(self, spec_date):
date_str = '{0}/{1:02d}/{2:02d}'.format(spec_date.year-1911, spec_date.month, spec_date.day)
ttime = str(int(time.time()*100))
url = 'http://www.tpex.org.tw/web/stock/aftertrading/daily_close_quotes/stk_quote_result.php?l=zh-tw&d={}&_={}'.format(date_str, ttime)
page = requests.get(url)
if not page.ok:
logging.error("Can not get OTC data at {}".format(date_str))
# print(page.content)
content = page.json()
# print(content)
# key = 'Nodata'
if (len(content['aaData']) + len(content['mmData'])) == 0:
print('No data at this day ' + date_str)
return -1
data_df = DataFrame(data=content['aaData'] + content['mmData'],
columns=['code','name','close','difference','open',
'high','low','avg','volume','turnover',
'transaction','last_buy',
'last_sell','NumOfShare','NextRefPrice',
'NextUpperPrice', 'NextLowerPrice'])
data_df = data_df.applymap(lambda x: re.sub(",","",x))# clear comma
return data_df
def check_all_tse_data(self):
Filelist = os.listdir(self.prefix)
if 'offday.xlsx' in Filelist:
offday_ser = pd.read_excel(self.prefix + '/offday.xlsx')
offday_ser = offday_ser['date'].copy()
else:
offday_ser = Series(name='date', data='First')
offday_update = False
lastday_update = False
Now = datetime.now()
Nowdate = datetime(Now.year, Now.month, Now.day)
if 'lastday.txt' in Filelist:
with open(self.prefix + '/lastday.txt', 'r') as f:
read_data = f.read()
f.close()
Startdate = datetime(int(read_data[0:4]),
int(read_data[4:6]),
int(read_data[6:8]))
else:
#Start from 2004(093)/02/11
Startdate = datetime(2004, 2, 11)
datediff = timedelta(days=1)
while Startdate <= Nowdate:
date_str = '{0}{1:02d}{2:02d}'.\
format(Startdate.year-1911,Startdate.month, Startdate.day)
print('Read ' + date_str)
if ('%s.xlsx' %(date_str)) not in Filelist:# not in FileList
if (offday_ser != date_str).all():# not a offday
lastday_update = True
data_df = self.get_tse_one_day(Startdate) # collect data
if isinstance(data_df, DataFrame):# success
data_df.to_excel('{0}/{1}.xlsx'.format(self.prefix,date_str))# save data
else:# is an offday, update offday series
offday_ser.set_value( len(offday_ser), date_str)
offday_update = True
print(date_str + 'is an offday')
else:
print(date_str + ' is known as an offday')
else:
print(date_str + ' is in FileList')
Startdate = Startdate + datediff
if offday_update:
offday_ser.to_excel(self.prefix + '/offday.xlsx')
if lastday_update:
with open(self.prefix + '/lastday.txt', 'w') as f:
# Nowdate += timedelta(days=-1)
date_str = '{0}{1:02d}{2:02d}'.\
format(Nowdate.year,Nowdate.month, Nowdate.day)
f.write(date_str)
f.close()
def check_all_otc_data(self):
Filelist = os.listdir(self.prefix)
if 'offdayOTC.xlsx' in Filelist:
offday_ser = pd.read_excel(self.prefix + '/offdayOTC.xlsx')
offday_ser = offday_ser['date'].copy()
else:
offday_ser = Series(name='date', data='First')
offday_update = False
lastday_update = False
Now = datetime.now()
Nowdate = datetime(Now.year, Now.month, Now.day)
if 'lastdayOTC.txt' in Filelist:
with open(self.prefix + '/lastdayOTC.txt', 'r') as f:
read_data = f.read()
f.close()
Startdate = datetime(int(read_data[0:4]),
int(read_data[4:6]),
int(read_data[6:8]))
else:
#Start from 2007(096)/04/23
Startdate = datetime(2007, 4, 23)
# Startdate = datetime(2008, 2, 28)
datediff = timedelta(days=1)
while Startdate <= Nowdate:
date_str = '{0}{1:02d}{2:02d}'.\
format(Startdate.year-1911,Startdate.month, Startdate.day)
print('Read ' + date_str + ' OTC')
if ('%sOTC.xlsx' %(date_str)) not in Filelist:# not in FileList
if (offday_ser != date_str).all():# not a offday
lastday_update = True
time.sleep(np.random.random())
data_df = self.get_otc_one_day(Startdate) # collect data
if isinstance(data_df, DataFrame):# success
data_df.to_excel('{0}/{1}OTC.xlsx'.format(self.prefix,date_str))# save data
else:# is an offday, update offday series
offday_ser.set_value( len(offday_ser), date_str)
offday_update = True
print(date_str + 'is an offday')
else:
print(date_str + ' is known as an offday')
else:
print(date_str + ' is in FileList')
Startdate = Startdate + datediff
if offday_update:
offday_ser.to_excel(self.prefix + '/offdayOTC.xlsx')
if lastday_update:
with open(self.prefix + '/lastdayOTC.txt', 'w') as f:
# Nowdate += timedelta(days=-1)
date_str = '{0}{1:02d}{2:02d}'.\
format(Nowdate.year,Nowdate.month, Nowdate.day)
f.write(date_str)
f.close()
| bsd-2-clause |
CVML/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
Aasmi/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
jorik041/scikit-learn | sklearn/decomposition/pca.py | 192 | 23117 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Michael Eickenberg <michael.eickenberg@inria.fr>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
r-rathi/error-control-coding | perf/plot-pegd.py | 1 | 1496 | import numpy as np
import matplotlib.pyplot as plt
from errsim import *
def label(d, pe, pb, n):
if pb is None:
pb = pe
label = 'd={} pe={} n={} BSC'.format(d, pe, n)
else:
label = 'd={} pe={} n={} pb={}'.format(d, pe, n, pb)
return label
def plot(pe, fpath=None):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
r = np.arange(8, 65)
pWL = jointpmf5(pe, pe, 128)
ax.plot(r, r_vs_pegd(pWL, 3, r) , 'g--', lw=2, label=label(3, pe, None, 128))
ax.plot(r, r_vs_pegd(pWL, 6, r) , 'g-', lw=2, label=label(6, pe, None, 128))
pWL = jointpmf5(pe, .1, 128)
ax.plot(r, r_vs_pegd(pWL, 3, r) , 'b--', lw=2, label=label(3, pe, .1, 128))
ax.plot(r, r_vs_pegd(pWL, 6, r) , 'b-', lw=2, label=label(6, pe, .1, 128))
pWL = jointpmf5(pe, .5, 128)
ax.plot(r, r_vs_pegd(pWL, 3, r) , 'r--', lw=2, label=label(3, pe, .5, 128))
ax.plot(r, r_vs_pegd(pWL, 6, r) , 'r-', lw=2, label=label(6, pe, .5, 128))
ax.set_yscale('log')
ax.set_xticks(r[::8])
ax.set_xlim(r[0], r[-1])
#ax.set_ylim(1e-30, 1e-1)
ax.set_xlabel('Burst error correction capability, $r$')
ax.set_ylabel('$P_{egd}$')
ax.set_title('Probability of Exceeding Guarenteed Error Detection Capability')
ax.legend(loc='lower right')
ax.grid(True)
#plt.tight_layout()
if fpath:
fig.savefig(fpath)
plt.show()
plt.close('all')
plot(1e-15, 'plots/pegd-pe=1e15.png')
plot(1e-6, 'plots/pegd-pe=1e6.png')
| mit |
glorizen/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/dviread.py | 69 | 29920 | """
An experimental module for reading dvi files output by TeX. Several
limitations make this not (currently) useful as a general-purpose dvi
preprocessor.
Interface::
dvi = Dvi(filename, 72)
for page in dvi: # iterate over pages
w, h, d = page.width, page.height, page.descent
for x,y,font,glyph,width in page.text:
fontname = font.texname
pointsize = font.size
...
for x,y,height,width in page.boxes:
...
"""
import errno
import matplotlib
import matplotlib.cbook as mpl_cbook
import numpy as np
import struct
import subprocess
_dvistate = mpl_cbook.Bunch(pre=0, outer=1, inpage=2, post_post=3, finale=4)
class Dvi(object):
"""
A dvi ("device-independent") file, as produced by TeX.
The current implementation only reads the first page and does not
even attempt to verify the postamble.
"""
def __init__(self, filename, dpi):
"""
Initialize the object. This takes the filename as input and
opens the file; actually reading the file happens when
iterating through the pages of the file.
"""
matplotlib.verbose.report('Dvi: ' + filename, 'debug')
self.file = open(filename, 'rb')
self.dpi = dpi
self.fonts = {}
self.state = _dvistate.pre
def __iter__(self):
"""
Iterate through the pages of the file.
Returns (text, pages) pairs, where:
text is a list of (x, y, fontnum, glyphnum, width) tuples
boxes is a list of (x, y, height, width) tuples
The coordinates are transformed into a standard Cartesian
coordinate system at the dpi value given when initializing.
The coordinates are floating point numbers, but otherwise
precision is not lost and coordinate values are not clipped to
integers.
"""
while True:
have_page = self._read()
if have_page:
yield self._output()
else:
break
def close(self):
"""
Close the underlying file if it is open.
"""
if not self.file.closed:
self.file.close()
def _output(self):
"""
Output the text and boxes belonging to the most recent page.
page = dvi._output()
"""
minx, miny, maxx, maxy = np.inf, np.inf, -np.inf, -np.inf
maxy_pure = -np.inf
for elt in self.text + self.boxes:
if len(elt) == 4: # box
x,y,h,w = elt
e = 0 # zero depth
else: # glyph
x,y,font,g,w = elt
h = _mul2012(font._scale, font._tfm.height[g])
e = _mul2012(font._scale, font._tfm.depth[g])
minx = min(minx, x)
miny = min(miny, y - h)
maxx = max(maxx, x + w)
maxy = max(maxy, y + e)
maxy_pure = max(maxy_pure, y)
if self.dpi is None:
# special case for ease of debugging: output raw dvi coordinates
return mpl_cbook.Bunch(text=self.text, boxes=self.boxes,
width=maxx-minx, height=maxy_pure-miny,
descent=maxy-maxy_pure)
d = self.dpi / (72.27 * 2**16) # from TeX's "scaled points" to dpi units
text = [ ((x-minx)*d, (maxy-y)*d, f, g, w*d)
for (x,y,f,g,w) in self.text ]
boxes = [ ((x-minx)*d, (maxy-y)*d, h*d, w*d) for (x,y,h,w) in self.boxes ]
return mpl_cbook.Bunch(text=text, boxes=boxes,
width=(maxx-minx)*d,
height=(maxy_pure-miny)*d,
descent=(maxy-maxy_pure)*d)
def _read(self):
"""
Read one page from the file. Return True if successful,
False if there were no more pages.
"""
while True:
byte = ord(self.file.read(1))
self._dispatch(byte)
# if self.state == _dvistate.inpage:
# matplotlib.verbose.report(
# 'Dvi._read: after %d at %f,%f' %
# (byte, self.h, self.v),
# 'debug-annoying')
if byte == 140: # end of page
return True
if self.state == _dvistate.post_post: # end of file
self.close()
return False
def _arg(self, nbytes, signed=False):
"""
Read and return an integer argument "nbytes" long.
Signedness is determined by the "signed" keyword.
"""
str = self.file.read(nbytes)
value = ord(str[0])
if signed and value >= 0x80:
value = value - 0x100
for i in range(1, nbytes):
value = 0x100*value + ord(str[i])
return value
def _dispatch(self, byte):
"""
Based on the opcode "byte", read the correct kinds of
arguments from the dvi file and call the method implementing
that opcode with those arguments.
"""
if 0 <= byte <= 127: self._set_char(byte)
elif byte == 128: self._set_char(self._arg(1))
elif byte == 129: self._set_char(self._arg(2))
elif byte == 130: self._set_char(self._arg(3))
elif byte == 131: self._set_char(self._arg(4, True))
elif byte == 132: self._set_rule(self._arg(4, True), self._arg(4, True))
elif byte == 133: self._put_char(self._arg(1))
elif byte == 134: self._put_char(self._arg(2))
elif byte == 135: self._put_char(self._arg(3))
elif byte == 136: self._put_char(self._arg(4, True))
elif byte == 137: self._put_rule(self._arg(4, True), self._arg(4, True))
elif byte == 138: self._nop()
elif byte == 139: self._bop(*[self._arg(4, True) for i in range(11)])
elif byte == 140: self._eop()
elif byte == 141: self._push()
elif byte == 142: self._pop()
elif byte == 143: self._right(self._arg(1, True))
elif byte == 144: self._right(self._arg(2, True))
elif byte == 145: self._right(self._arg(3, True))
elif byte == 146: self._right(self._arg(4, True))
elif byte == 147: self._right_w(None)
elif byte == 148: self._right_w(self._arg(1, True))
elif byte == 149: self._right_w(self._arg(2, True))
elif byte == 150: self._right_w(self._arg(3, True))
elif byte == 151: self._right_w(self._arg(4, True))
elif byte == 152: self._right_x(None)
elif byte == 153: self._right_x(self._arg(1, True))
elif byte == 154: self._right_x(self._arg(2, True))
elif byte == 155: self._right_x(self._arg(3, True))
elif byte == 156: self._right_x(self._arg(4, True))
elif byte == 157: self._down(self._arg(1, True))
elif byte == 158: self._down(self._arg(2, True))
elif byte == 159: self._down(self._arg(3, True))
elif byte == 160: self._down(self._arg(4, True))
elif byte == 161: self._down_y(None)
elif byte == 162: self._down_y(self._arg(1, True))
elif byte == 163: self._down_y(self._arg(2, True))
elif byte == 164: self._down_y(self._arg(3, True))
elif byte == 165: self._down_y(self._arg(4, True))
elif byte == 166: self._down_z(None)
elif byte == 167: self._down_z(self._arg(1, True))
elif byte == 168: self._down_z(self._arg(2, True))
elif byte == 169: self._down_z(self._arg(3, True))
elif byte == 170: self._down_z(self._arg(4, True))
elif 171 <= byte <= 234: self._fnt_num(byte-171)
elif byte == 235: self._fnt_num(self._arg(1))
elif byte == 236: self._fnt_num(self._arg(2))
elif byte == 237: self._fnt_num(self._arg(3))
elif byte == 238: self._fnt_num(self._arg(4, True))
elif 239 <= byte <= 242:
len = self._arg(byte-238)
special = self.file.read(len)
self._xxx(special)
elif 243 <= byte <= 246:
k = self._arg(byte-242, byte==246)
c, s, d, a, l = [ self._arg(x) for x in (4, 4, 4, 1, 1) ]
n = self.file.read(a+l)
self._fnt_def(k, c, s, d, a, l, n)
elif byte == 247:
i, num, den, mag, k = [ self._arg(x) for x in (1, 4, 4, 4, 1) ]
x = self.file.read(k)
self._pre(i, num, den, mag, x)
elif byte == 248: self._post()
elif byte == 249: self._post_post()
else:
raise ValueError, "unknown command: byte %d"%byte
def _pre(self, i, num, den, mag, comment):
if self.state != _dvistate.pre:
raise ValueError, "pre command in middle of dvi file"
if i != 2:
raise ValueError, "Unknown dvi format %d"%i
if num != 25400000 or den != 7227 * 2**16:
raise ValueError, "nonstandard units in dvi file"
# meaning: TeX always uses those exact values, so it
# should be enough for us to support those
# (There are 72.27 pt to an inch so 7227 pt =
# 7227 * 2**16 sp to 100 in. The numerator is multiplied
# by 10^5 to get units of 10**-7 meters.)
if mag != 1000:
raise ValueError, "nonstandard magnification in dvi file"
# meaning: LaTeX seems to frown on setting \mag, so
# I think we can assume this is constant
self.state = _dvistate.outer
def _set_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced set_char in dvi file"
self._put_char(char)
self.h += self.fonts[self.f]._width_of(char)
def _set_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced set_rule in dvi file"
self._put_rule(a, b)
self.h += b
def _put_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced put_char in dvi file"
font = self.fonts[self.f]
if font._vf is None:
self.text.append((self.h, self.v, font, char,
font._width_of(char)))
# matplotlib.verbose.report(
# 'Dvi._put_char: %d,%d %d' %(self.h, self.v, char),
# 'debug-annoying')
else:
scale = font._scale
for x, y, f, g, w in font._vf[char].text:
newf = DviFont(scale=_mul2012(scale, f._scale),
tfm=f._tfm, texname=f.texname, vf=f._vf)
self.text.append((self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
newf, g, newf._width_of(g)))
self.boxes.extend([(self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
_mul2012(a, scale), _mul2012(b, scale))
for x, y, a, b in font._vf[char].boxes])
def _put_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced put_rule in dvi file"
if a > 0 and b > 0:
self.boxes.append((self.h, self.v, a, b))
# matplotlib.verbose.report(
# 'Dvi._put_rule: %d,%d %d,%d' % (self.h, self.v, a, b),
# 'debug-annoying')
def _nop(self):
pass
def _bop(self, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, p):
if self.state != _dvistate.outer:
raise ValueError, \
"misplaced bop in dvi file (state %d)" % self.state
self.state = _dvistate.inpage
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack = []
self.text = [] # list of (x,y,fontnum,glyphnum)
self.boxes = [] # list of (x,y,width,height)
def _eop(self):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced eop in dvi file"
self.state = _dvistate.outer
del self.h, self.v, self.w, self.x, self.y, self.z, self.stack
def _push(self):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced push in dvi file"
self.stack.append((self.h, self.v, self.w, self.x, self.y, self.z))
def _pop(self):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced pop in dvi file"
self.h, self.v, self.w, self.x, self.y, self.z = self.stack.pop()
def _right(self, b):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced right in dvi file"
self.h += b
def _right_w(self, new_w):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced w in dvi file"
if new_w is not None:
self.w = new_w
self.h += self.w
def _right_x(self, new_x):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced x in dvi file"
if new_x is not None:
self.x = new_x
self.h += self.x
def _down(self, a):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced down in dvi file"
self.v += a
def _down_y(self, new_y):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced y in dvi file"
if new_y is not None:
self.y = new_y
self.v += self.y
def _down_z(self, new_z):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced z in dvi file"
if new_z is not None:
self.z = new_z
self.v += self.z
def _fnt_num(self, k):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced fnt_num in dvi file"
self.f = k
def _xxx(self, special):
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and ch
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
def _fnt_def(self, k, c, s, d, a, l, n):
tfm = _tfmfile(n[-l:])
if c != 0 and tfm.checksum != 0 and c != tfm.checksum:
raise ValueError, 'tfm checksum mismatch: %s'%n
# It seems that the assumption behind the following check is incorrect:
#if d != tfm.design_size:
# raise ValueError, 'tfm design size mismatch: %d in dvi, %d in %s'%\
# (d, tfm.design_size, n)
vf = _vffile(n[-l:])
self.fonts[k] = DviFont(scale=s, tfm=tfm, texname=n, vf=vf)
def _post(self):
if self.state != _dvistate.outer:
raise ValueError, "misplaced post in dvi file"
self.state = _dvistate.post_post
# TODO: actually read the postamble and finale?
# currently post_post just triggers closing the file
def _post_post(self):
raise NotImplementedError
class DviFont(object):
"""
Object that holds a font's texname and size, supports comparison,
and knows the widths of glyphs in the same units as the AFM file.
There are also internal attributes (for use by dviread.py) that
are _not_ used for comparison.
The size is in Adobe points (converted from TeX points).
"""
__slots__ = ('texname', 'size', 'widths', '_scale', '_vf', '_tfm')
def __init__(self, scale, tfm, texname, vf):
self._scale, self._tfm, self.texname, self._vf = \
scale, tfm, texname, vf
self.size = scale * (72.0 / (72.27 * 2**16))
try:
nchars = max(tfm.width.iterkeys())
except ValueError:
nchars = 0
self.widths = [ (1000*tfm.width.get(char, 0)) >> 20
for char in range(nchars) ]
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.texname == other.texname and self.size == other.size
def __ne__(self, other):
return not self.__eq__(other)
def _width_of(self, char):
"""
Width of char in dvi units. For internal use by dviread.py.
"""
width = self._tfm.width.get(char, None)
if width is not None:
return _mul2012(width, self._scale)
matplotlib.verbose.report(
'No width for char %d in font %s' % (char, self.texname),
'debug')
return 0
class Vf(Dvi):
"""
A virtual font (\*.vf file) containing subroutines for dvi files.
Usage::
vf = Vf(filename)
glyph = vf[code]
glyph.text, glyph.boxes, glyph.width
"""
def __init__(self, filename):
Dvi.__init__(self, filename, 0)
self._first_font = None
self._chars = {}
self._packet_ends = None
self._read()
self.close()
def __getitem__(self, code):
return self._chars[code]
def _dispatch(self, byte):
# If we are in a packet, execute the dvi instructions
if self.state == _dvistate.inpage:
byte_at = self.file.tell()-1
if byte_at == self._packet_ends:
self._finalize_packet()
# fall through
elif byte_at > self._packet_ends:
raise ValueError, "Packet length mismatch in vf file"
else:
if byte in (139, 140) or byte >= 243:
raise ValueError, "Inappropriate opcode %d in vf file" % byte
Dvi._dispatch(self, byte)
return
# We are outside a packet
if byte < 242: # a short packet (length given by byte)
cc, tfm = self._arg(1), self._arg(3)
self._init_packet(byte, cc, tfm)
elif byte == 242: # a long packet
pl, cc, tfm = [ self._arg(x) for x in (4, 4, 4) ]
self._init_packet(pl, cc, tfm)
elif 243 <= byte <= 246:
Dvi._dispatch(self, byte)
elif byte == 247: # preamble
i, k = self._arg(1), self._arg(1)
x = self.file.read(k)
cs, ds = self._arg(4), self._arg(4)
self._pre(i, x, cs, ds)
elif byte == 248: # postamble (just some number of 248s)
self.state = _dvistate.post_post
else:
raise ValueError, "unknown vf opcode %d" % byte
def _init_packet(self, pl, cc, tfm):
if self.state != _dvistate.outer:
raise ValueError, "Misplaced packet in vf file"
self.state = _dvistate.inpage
self._packet_ends = self.file.tell() + pl
self._packet_char = cc
self._packet_width = tfm
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack, self.text, self.boxes = [], [], []
self.f = self._first_font
def _finalize_packet(self):
self._chars[self._packet_char] = mpl_cbook.Bunch(
text=self.text, boxes=self.boxes, width = self._packet_width)
self.state = _dvistate.outer
def _pre(self, i, x, cs, ds):
if self.state != _dvistate.pre:
raise ValueError, "pre command in middle of vf file"
if i != 202:
raise ValueError, "Unknown vf format %d" % i
if len(x):
matplotlib.verbose.report('vf file comment: ' + x, 'debug')
self.state = _dvistate.outer
# cs = checksum, ds = design size
def _fnt_def(self, k, *args):
Dvi._fnt_def(self, k, *args)
if self._first_font is None:
self._first_font = k
def _fix2comp(num):
"""
Convert from two's complement to negative.
"""
assert 0 <= num < 2**32
if num & 2**31:
return num - 2**32
else:
return num
def _mul2012(num1, num2):
"""
Multiply two numbers in 20.12 fixed point format.
"""
# Separated into a function because >> has surprising precedence
return (num1*num2) >> 20
class Tfm(object):
"""
A TeX Font Metric file. This implementation covers only the bare
minimum needed by the Dvi class.
Attributes:
checksum: for verifying against dvi file
design_size: design size of the font (in what units?)
width[i]: width of character \#i, needs to be scaled
by the factor specified in the dvi file
(this is a dict because indexing may not start from 0)
height[i], depth[i]: height and depth of character \#i
"""
__slots__ = ('checksum', 'design_size', 'width', 'height', 'depth')
def __init__(self, filename):
matplotlib.verbose.report('opening tfm file ' + filename, 'debug')
file = open(filename, 'rb')
try:
header1 = file.read(24)
lh, bc, ec, nw, nh, nd = \
struct.unpack('!6H', header1[2:14])
matplotlib.verbose.report(
'lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d' % (
lh, bc, ec, nw, nh, nd), 'debug')
header2 = file.read(4*lh)
self.checksum, self.design_size = \
struct.unpack('!2I', header2[:8])
# there is also encoding information etc.
char_info = file.read(4*(ec-bc+1))
widths = file.read(4*nw)
heights = file.read(4*nh)
depths = file.read(4*nd)
finally:
file.close()
self.width, self.height, self.depth = {}, {}, {}
widths, heights, depths = \
[ struct.unpack('!%dI' % (len(x)/4), x)
for x in (widths, heights, depths) ]
for i in range(ec-bc):
self.width[bc+i] = _fix2comp(widths[ord(char_info[4*i])])
self.height[bc+i] = _fix2comp(heights[ord(char_info[4*i+1]) >> 4])
self.depth[bc+i] = _fix2comp(depths[ord(char_info[4*i+1]) & 0xf])
class PsfontsMap(object):
"""
A psfonts.map formatted file, mapping TeX fonts to PS fonts.
Usage: map = PsfontsMap('.../psfonts.map'); map['cmr10']
For historical reasons, TeX knows many Type-1 fonts by different
names than the outside world. (For one thing, the names have to
fit in eight characters.) Also, TeX's native fonts are not Type-1
but Metafont, which is nontrivial to convert to PostScript except
as a bitmap. While high-quality conversions to Type-1 format exist
and are shipped with modern TeX distributions, we need to know
which Type-1 fonts are the counterparts of which native fonts. For
these reasons a mapping is needed from internal font names to font
file names.
A texmf tree typically includes mapping files called e.g.
psfonts.map, pdftex.map, dvipdfm.map. psfonts.map is used by
dvips, pdftex.map by pdfTeX, and dvipdfm.map by dvipdfm.
psfonts.map might avoid embedding the 35 PostScript fonts, while
the pdf-related files perhaps only avoid the "Base 14" pdf fonts.
But the user may have configured these files differently.
"""
__slots__ = ('_font',)
def __init__(self, filename):
self._font = {}
file = open(filename, 'rt')
try:
self._parse(file)
finally:
file.close()
def __getitem__(self, texname):
result = self._font[texname]
fn, enc = result.filename, result.encoding
if fn is not None and not fn.startswith('/'):
result.filename = find_tex_file(fn)
if enc is not None and not enc.startswith('/'):
result.encoding = find_tex_file(result.encoding)
return result
def _parse(self, file):
"""Parse each line into words."""
for line in file:
line = line.strip()
if line == '' or line.startswith('%'):
continue
words, pos = [], 0
while pos < len(line):
if line[pos] == '"': # double quoted word
pos += 1
end = line.index('"', pos)
words.append(line[pos:end])
pos = end + 1
else: # ordinary word
end = line.find(' ', pos+1)
if end == -1: end = len(line)
words.append(line[pos:end])
pos = end
while pos < len(line) and line[pos] == ' ':
pos += 1
self._register(words)
def _register(self, words):
"""Register a font described by "words".
The format is, AFAIK: texname fontname [effects and filenames]
Effects are PostScript snippets like ".177 SlantFont",
filenames begin with one or two less-than signs. A filename
ending in enc is an encoding file, other filenames are font
files. This can be overridden with a left bracket: <[foobar
indicates an encoding file named foobar.
There is some difference between <foo.pfb and <<bar.pfb in
subsetting, but I have no example of << in my TeX installation.
"""
texname, psname = words[:2]
effects, encoding, filename = [], None, None
for word in words[2:]:
if not word.startswith('<'):
effects.append(word)
else:
word = word.lstrip('<')
if word.startswith('['):
assert encoding is None
encoding = word[1:]
elif word.endswith('.enc'):
assert encoding is None
encoding = word
else:
assert filename is None
filename = word
self._font[texname] = mpl_cbook.Bunch(
texname=texname, psname=psname, effects=effects,
encoding=encoding, filename=filename)
class Encoding(object):
"""
Parses a \*.enc file referenced from a psfonts.map style file.
The format this class understands is a very limited subset of
PostScript.
Usage (subject to change)::
for name in Encoding(filename):
whatever(name)
"""
__slots__ = ('encoding',)
def __init__(self, filename):
file = open(filename, 'rt')
try:
matplotlib.verbose.report('Parsing TeX encoding ' + filename, 'debug-annoying')
self.encoding = self._parse(file)
matplotlib.verbose.report('Result: ' + `self.encoding`, 'debug-annoying')
finally:
file.close()
def __iter__(self):
for name in self.encoding:
yield name
def _parse(self, file):
result = []
state = 0
for line in file:
comment_start = line.find('%')
if comment_start > -1:
line = line[:comment_start]
line = line.strip()
if state == 0:
# Expecting something like /FooEncoding [
if '[' in line:
state = 1
line = line[line.index('[')+1:].strip()
if state == 1:
if ']' in line: # ] def
line = line[:line.index(']')]
state = 2
words = line.split()
for w in words:
if w.startswith('/'):
# Allow for /abc/def/ghi
subwords = w.split('/')
result.extend(subwords[1:])
else:
raise ValueError, "Broken name in encoding file: " + w
return result
def find_tex_file(filename, format=None):
"""
Call kpsewhich to find a file in the texmf tree.
If format is not None, it is used as the value for the --format option.
See the kpathsea documentation for more information.
Apparently most existing TeX distributions on Unix-like systems
use kpathsea. I hear MikTeX (a popular distribution on Windows)
doesn't use kpathsea, so what do we do? (TODO)
"""
cmd = ['kpsewhich']
if format is not None:
cmd += ['--format=' + format]
cmd += [filename]
matplotlib.verbose.report('find_tex_file(%s): %s' \
% (filename,cmd), 'debug')
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
result = pipe.communicate()[0].rstrip()
matplotlib.verbose.report('find_tex_file result: %s' % result,
'debug')
return result
def _read_nointr(pipe, bufsize=-1):
while True:
try:
return pipe.read(bufsize)
except OSError, e:
if e.errno == errno.EINTR:
continue
else:
raise
# With multiple text objects per figure (e.g. tick labels) we may end
# up reading the same tfm and vf files many times, so we implement a
# simple cache. TODO: is this worth making persistent?
_tfmcache = {}
_vfcache = {}
def _fontfile(texname, class_, suffix, cache):
try:
return cache[texname]
except KeyError:
pass
filename = find_tex_file(texname + suffix)
if filename:
result = class_(filename)
else:
result = None
cache[texname] = result
return result
def _tfmfile(texname):
return _fontfile(texname, Tfm, '.tfm', _tfmcache)
def _vffile(texname):
return _fontfile(texname, Vf, '.vf', _vfcache)
if __name__ == '__main__':
import sys
matplotlib.verbose.set_level('debug-annoying')
fname = sys.argv[1]
try: dpi = float(sys.argv[2])
except IndexError: dpi = None
dvi = Dvi(fname, dpi)
fontmap = PsfontsMap(find_tex_file('pdftex.map'))
for page in dvi:
print '=== new page ==='
fPrev = None
for x,y,f,c,w in page.text:
if f != fPrev:
print 'font', f.texname, 'scaled', f._scale/pow(2.0,20)
fPrev = f
print x,y,c, 32 <= c < 128 and chr(c) or '.', w
for x,y,w,h in page.boxes:
print x,y,'BOX',w,h
| agpl-3.0 |
theoryno3/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
timqian/sms-tools | lectures/8-Sound-transformations/plots-code/sineModelFreqScale-orchestra.py | 21 | 2666 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import sineModel as SM
import stft as STFT
import utilFunctions as UF
import sineTransformations as SMT
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
w = np.hamming(801)
N = 2048
t = -90
minSineDur = .005
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.02
Ns = 512
H = Ns/4
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
freqScaling = np.array([0, .8, 1, 1.2])
ytfreq = SMT.sineFreqScaling(tfreq, freqScaling)
y = SM.sineModelSynth(ytfreq, tmag, np.array([]), Ns, H, fs)
mY, pY = STFT.stftAnal(y, fs, w, N, H)
UF.wavwrite(y,fs, 'sineModelFreqScale-orchestra.wav')
maxplotfreq = 4000.0
plt.figure(1, figsize=(9.5, 7))
plt.subplot(4,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0,x.size/float(fs),min(x),max(x)])
plt.title('x (orchestra.wav)')
plt.subplot(4,1,2)
numFrames = int(tfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1)
plt.autoscale(tight=True)
plt.title('sine frequencies')
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
plt.subplot(4,1,3)
numFrames = int(ytfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
tracks = ytfreq*np.less(ytfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1)
plt.autoscale(tight=True)
plt.title('freq-scaled sine frequencies')
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mY[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mY[:,:maxplotbin+1]))
plt.autoscale(tight=True)
plt.subplot(4,1,4)
plt.plot(np.arange(y.size)/float(fs), y, 'b')
plt.axis([0,y.size/float(fs),min(y),max(y)])
plt.title('y')
plt.tight_layout()
plt.savefig('sineModelFreqScale-orchestra.png')
plt.show()
| agpl-3.0 |
ssh0/growing-string | triangular_lattice/diecutting/result_count_on_edge.py | 1 | 9360 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2016-12-16
import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d.axes3d import Axes3D
import matplotlib.cm as cm
import numpy as np
import set_data_path
class Visualizer(object):
def __init__(self, subjects):
self.data_path_list = set_data_path.data_path
if len(subjects) != 0:
for subject in subjects:
getattr(self, 'result_' + subject)()
def load_data(self, _path):
data = np.load(_path)
beta = data['beta']
try:
size_dist_ave = data['size_dist_ave']
if len(size_dist_ave) == 0:
raise KeyError
return self.load_data_averaged(_path)
except KeyError:
pass
num_of_strings = data['num_of_strings']
frames = data['frames']
Ls = data['Ls'].astype(np.float)
# Ls = (3 * Ls * (Ls + 1) + 1)
size_dist = data['size_dist']
N0 = np.array([l[1] for l in size_dist], dtype=np.float) / num_of_strings
n0 = N0[1:]
S = np.array([np.sum(l) for l in size_dist], dtype=np.float) / num_of_strings
n1 = (S[1:] - n0) * 2.
N = []
for l in size_dist:
dot = np.dot(np.arange(len(l)), np.array(l).T)
N.append(dot)
# N = np.array([np.dot(np.arange(len(l)), np.array(l).T) for l in size_dist])
N_all = 3. * Ls * (Ls + 1.) + 1
N = np.array(N, dtype=np.float) / num_of_strings
N_minus = N_all - N
N_minus_rate = N_minus / N_all
n_minus = N_minus[1:] - N_minus[:-1]
n1_ave = n1 / np.sum(n1)
n2 = (6 * Ls[1:]) - (n0 + n1 + n_minus)
self.beta = beta
self.num_of_strings = num_of_strings
self.frames = frames
self.Ls = Ls
self.N = N
self.N_minus = N_minus
self.N_minus_rate = N_minus_rate
self.S = S
self.n0 = n0
self.n1 = n1
self.n2 = n2
self.n_minus = n_minus
self.n1_ave = n1_ave
def load_data_averaged(self, _path):
data = np.load(_path)
beta = data['beta']
num_of_strings = data['num_of_strings']
frames = data['frames']
Ls = data['Ls'].astype(np.float)
# Ls = (3 * Ls * (Ls + 1) + 1)
# size_dist = data['size_dist']
size_dist_ave = data['size_dist_ave']
N0 = np.array([l[1] for l in size_dist_ave], dtype=np.float)
n0 = N0[1:]
S = np.array([np.sum(l) for l in size_dist_ave], dtype=np.float)
n1 = (S[1:] - n0) * 2.
N = []
for l in size_dist_ave:
dot = np.dot(np.arange(len(l)), np.array(l).T)
N.append(dot)
# N = np.array([np.dot(np.arange(len(l)), np.array(l).T) for l in size_dist_ave])
N_all = 3. * Ls * (Ls + 1.) + 1
N = np.array(N, dtype=np.float)
N_minus = N_all - N
N_minus_rate = N_minus / N_all
n_minus = N_minus[1:] - N_minus[:-1]
n1_ave = n1 / np.sum(n1)
n2 = (6 * Ls[1:]) - (n0 + n1 + n_minus)
self.beta = beta
self.num_of_strings = num_of_strings
self.frames = frames
self.Ls = Ls
self.N = N
self.N_all = N_all
self.N_minus = N_minus
self.N_minus_rate = N_minus_rate
self.S = S
self.n_all = 6 * Ls[1:]
self.n0 = n0
self.n1 = n1
self.n2 = n2
self.n_minus = n_minus
self.n1_ave = n1_ave
def result_N(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.N[1:], '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Occupied points in the cutting region' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$N$')
plt.show()
def result_N_minus_rate(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.N_minus_rate[1:], '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('The rate of not occupied site in all N' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$N_{-1} / N_{\mathrm{all}}$')
plt.show()
def result_n0(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.n0, '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites which is the only member of \
a subcluster on the cutting edges.' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{0}$')
plt.show()
def result_n1(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.n1, '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites which is connected to a \
existing subcluster on the cutting edges.' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{1}$')
plt.show()
def result_n2(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.n2, '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites on the cutting edges which \
is connected to two neighbors.' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{2}$')
plt.show()
def result_n_minus(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.n_minus, '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites which is not occupied on \
the cutting edges.' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{-1}$')
plt.show()
def result_S(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.S[1:] / np.sum(self.S[1:]), '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_ylim([0, ax.get_ylim()[1]])
ax.set_title('Averaged number of the subclusters in the cutted region.'
+ ' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$S$')
plt.show()
def result_S_rate(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
# ax.plot(self.Ls[1:], self.S[1:] / np.sum(self.S[1:]), '.',
# ax.plot(self.Ls[1:], self.S[1:] / self.n_all, '.',
ax.plot(self.Ls[1:], self.S[1:] / self.N[1:], '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_ylim([0, ax.get_ylim()[1]])
ax.set_title('Averaged number of the subclusters in the cutted region'
+ ' (normalized)'
+ ' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$S$')
plt.show()
if __name__ == '__main__':
# subject: 'N', 'N_minus_rate', 'n0', 'n1', 'n2', 'n_minus', 'S'
main = Visualizer(
[
# 'N',
# 'N_minus_rate',
# 'n0',
# 'n1',
# 'n2',
# 'n_minus',
'S',
# 'S_rate'
]
)
| mit |
zhester/hzpy | examples/parseriff.py | 1 | 2368 | #!/usr/bin/env python
"""
Example RIFF (WAV contents) Data Parser
Sample data is written to a CSV file for analysis.
If matplotlib and numpy are available, signal plots (DFTs) are generated.
"""
import math
import os
import struct
import wave
try:
import matplotlib.pyplot as plot
import numpy
import numpy.fft as fft
except ImportError:
numeric_packages = False
else:
numeric_packages = True
#=============================================================================
def frame2mag( frame ):
( i, q ) = struct.unpack( '<BB', frame )
return math.sqrt( ( i ** 2 ) + ( q ** 2 ) )
#=============================================================================
def main( argv ):
""" Script execution entry point """
# check usage
if len( argv ) < 2:
print 'You must specify at least an input file.'
return 0
# start and length
start = 0
length = 1024
if len( argv ) > 2:
start = int( argv[ 2 ] )
if len( argv ) > 3:
length = int( argv[ 3 ] )
# open file using wave module
wfile = wave.open( argv[ 1 ], 'rb' )
# print file info
print 'Channels: %d\nSample width: %d\nFrame rate: %d\nFrames: %d' % (
wfile.getnchannels(),
wfile.getsampwidth(),
wfile.getframerate(),
wfile.getnframes()
)
# check for starting offset
if start > 0:
junk = wfile.readframes( start )
# read frames
frames = wfile.readframes( length )
samples = []
for i in range( length ):
index = i * 2
samples.append( frame2mag( frames[ index : ( index + 2 ) ] ) )
# close wave file
wfile.close()
# plot
if numeric_packages == True:
fft_data = fft.fft( samples[ : 1024 ] )
mags = numpy.absolute( fft_data )
mags_db = [ 20 * numpy.log10( mag ) for mag in mags ]
plot.figure( 1 )
plot.plot( samples )
plot.figure( 2 )
plot.plot( mags_db )
plot.show()
# output
oname = argv[ 1 ].replace( '.wav', '.csv' )
ofile = open( oname, 'wb' )
for sample in samples:
ofile.write( '%d\n' % sample )
ofile.close()
# Return success.
return 0
#=============================================================================
if __name__ == "__main__":
import sys
sys.exit( main( sys.argv ) )
| bsd-2-clause |
ThomasSweijen/TPF | doc/sphinx/conf.py | 1 | 28022 | # -*- coding: utf-8 -*-
#
# Yade documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 16 21:49:34 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# relevant posts to sphinx ML
# http://groups.google.com/group/sphinx-dev/browse_thread/thread/b4fbc8d31d230fc4
# http://groups.google.com/group/sphinx-dev/browse_thread/thread/118598245d5f479b
#####################
## custom yade roles
#####################
##
## http://docutils.sourceforge.net/docs/howto/rst-roles.html
import sys, os, re
from docutils import nodes
from sphinx import addnodes
from sphinx.roles import XRefRole
import docutils
#
# needed for creating hyperlink targets.
# it should be cleand up and unified for both LaTeX and HTML via
# the pending_xref node which gets resolved to real link target
# by sphinx automatically once all docs have been processed.
#
# xrefs: http://groups.google.com/group/sphinx-dev/browse_thread/thread/d719d19307654548
#
#
import __builtin__
if 'latex' in sys.argv: __builtin__.writer='latex'
elif 'html' in sys.argv: __builtin__.writer='html'
elif 'epub' in sys.argv: __builtin__.writer='epub'
else: raise RuntimeError("Must have either 'latex' or 'html' on the command line (hack for reference styles)")
def yaderef_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yref:`` role, by making hyperlink to yade.wrapper.*. It supports :yref:`Link text<link target>` syntax, like usual hyperlinking roles."
id=rawtext.split(':',2)[2][1:-1]
txt=id; explicitText=False
m=re.match('(.*)\s*<(.*)>\s*',id)
if m:
explicitText=True
txt,id=m.group(1),m.group(2)
id=id.replace('::','.')
#node=nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='http://beta.arcig.cz/~eudoxos/yade/doxygen/?search=%s'%id,**options)
#node=nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='yade.wrapper.html#yade.wrapper.%s'%id,**options)
return [mkYrefNode(id,txt,rawtext,role,explicitText,lineno,options)],[]
def yadesrc_role(role,rawtext,lineno,inliner,options={},content=[]):
"Handle the :ysrc:`` role, making hyperlink to git repository webpage with that path. Supports :ysrc:`Link text<file/name>` syntax, like usual hyperlinking roles. If target ends with ``/``, it is assumed to be a directory."
id=rawtext.split(':',2)[2][1:-1]
txt=id
m=re.match('(.*)\s*<(.*)>\s*',id)
if m:
txt,id=m.group(1),m.group(2)
return [nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='https://github.com/yade/trunk/blob/master/%s'%id)],[] ### **options should be passed to nodes.reference as well
# map modules to their html (rst) filenames. Used for sub-modules, where e.g. SpherePack is yade._packSphere.SpherePack, but is documented from yade.pack.rst
moduleMap={
'yade._packPredicates':'yade.pack',
'yade._packSpheres':'yade.pack',
'yade._packObb':'yade.pack'
}
class YadeXRefRole(XRefRole):
#def process_link
def process_link(self, env, refnode, has_explicit_title, title, target):
print 'TARGET:','yade.wrapper.'+target
return '[['+title+']]','yade.wrapper.'+target
def mkYrefNode(target,text,rawtext,role,explicitText,lineno,options={}):
"""Create hyperlink to yade target. Targets starting with literal 'yade.' are absolute, but the leading 'yade.' will be stripped from the link text. Absolute tergets are supposed to live in page named yade.[module].html, anchored at #yade.[module2].[rest of target], where [module2] is identical to [module], unless mapped over by moduleMap.
Other targets are supposed to live in yade.wrapper (such as c++ classes)."""
writer=__builtin__.writer # to make sure not shadowed by a local var
import string
if target.startswith('yade.'):
module='.'.join(target.split('.')[0:2])
module2=(module if module not in moduleMap.keys() else moduleMap[module])
if target==module: target='' # to reference the module itself
uri=('%%%s#%s'%(module2,target) if writer=='latex' else '%s.html#%s'%(module2,target))
if not explicitText and module!=module2:
text=module2+'.'+'.'.join(target.split('.')[2:])
text=string.replace(text,'yade.','',1)
elif target.startswith('external:'):
exttarget=target.split(':',1)[1]
if not explicitText: text=exttarget
target=exttarget if '.' in exttarget else 'module-'+exttarget
uri=(('%%external#%s'%target) if writer=='latex' else 'external.html#%s'%target)
else:
uri=(('%%yade.wrapper#yade.wrapper.%s'%target) if writer=='latex' else 'yade.wrapper.html#yade.wrapper.%s'%target)
#print writer,uri
if 0:
refnode=addnodes.pending_xref(rawtext,reftype=role,refexplicit=explicitText,reftarget=target)
#refnode.line=lineno
#refnode+=nodes.literal(rawtext,text,classes=['ref',role])
return [refnode],[]
#ret.rawtext,reftype=role,
else:
return nodes.reference(rawtext,docutils.utils.unescape(text),refuri=uri,**options)
#return [refnode],[]
def ydefault_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :ydefault:`something` role. fixSignature handles it now in the member signature itself, this merely expands to nothing."
return [],[]
def yattrtype_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yattrtype:`something` role. fixSignature handles it now in the member signature itself, this merely expands to nothing."
return [],[]
# FIXME: should return readable representation of bits of the number (yade.wrapper.AttrFlags enum)
def yattrflags_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yattrflags:`something` role. fixSignature handles it now in the member signature itself."
return [],[]
from docutils.parsers.rst import roles
def yaderef_role_2(type,rawtext,text,lineno,inliner,options={},content=[]): return YadeXRefRole()('yref',rawtext,text,lineno,inliner,options,content)
roles.register_canonical_role('yref', yaderef_role)
roles.register_canonical_role('ysrc', yadesrc_role)
roles.register_canonical_role('ydefault', ydefault_role)
roles.register_canonical_role('yattrtype', yattrtype_role)
roles.register_canonical_role('yattrflags', yattrflags_role)
## http://sphinx.pocoo.org/config.html#confval-rst_epilog
rst_epilog = """
.. |yupdate| replace:: *(auto-updated)*
.. |ycomp| replace:: *(auto-computed)*
.. |ystatic| replace:: *(static)*
"""
import collections
def customExclude(app, what, name, obj, skip, options):
if name=='clone':
if 'Serializable.clone' in str(obj): return False
return True
#escape crash on non iterable __doc__ in some qt object
if hasattr(obj,'__doc__') and obj.__doc__ and not isinstance(obj.__doc__, collections.Iterable): return True
if hasattr(obj,'__doc__') and obj.__doc__ and ('|ydeprecated|' in obj.__doc__ or '|yhidden|' in obj.__doc__): return True
#if re.match(r'\b(__init__|__reduce__|__repr__|__str__)\b',name): return True
if name.startswith('_'):
if name=='__init__':
# skip boost classes with parameterless ctor (arg1=implicit self)
if obj.__doc__=="\n__init__( (object)arg1) -> None": return True
# skip undocumented ctors
if not obj.__doc__: return True
# skip default ctor for serializable, taking dict of attrs
if obj.__doc__=='\n__init__( (object)arg1) -> None\n\nobject __init__(tuple args, dict kwds)': return True
#for i,l in enumerate(obj.__doc__.split('\n')): print name,i,l,'##'
return False
return True
return False
def isBoostFunc(what,obj):
return what=='function' and obj.__repr__().startswith('<Boost.Python.function object at 0x')
def isBoostMethod(what,obj):
"I don't know how to distinguish boost and non-boost methods..."
return what=='method' and obj.__repr__().startswith('<unbound method ');
def replaceLaTeX(s):
# replace single non-escaped dollars $...$ by :math:`...`
# then \$ by single $
s=re.sub(r'(?<!\\)\$([^\$]+)(?<!\\)\$',r'\ :math:`\1`\ ',s)
return re.sub(r'\\\$',r'$',s)
def fixSrc(app,docname,source):
source[0]=replaceLaTeX(source[0])
def fixDocstring(app,what,name,obj,options,lines):
# remove empty default roles, which is not properly interpreted by docutils parser
for i in range(0,len(lines)):
lines[i]=lines[i].replace(':ydefault:``','')
lines[i]=lines[i].replace(':yattrtype:``','')
lines[i]=lines[i].replace(':yattrflags:``','')
#lines[i]=re.sub(':``',':` `',lines[i])
# remove signature of boost::python function docstring, which is the first line of the docstring
if isBoostFunc(what,obj):
l2=boostFuncSignature(name,obj)[1]
# we must replace lines one by one (in-place) :-|
# knowing that l2 is always shorter than lines (l2 is docstring with the signature stripped off)
for i in range(0,len(lines)):
lines[i]=l2[i] if i<len(l2) else ''
elif isBoostMethod(what,obj):
l2=boostFuncSignature(name,obj)[1]
for i in range(0,len(lines)):
lines[i]=l2[i] if i<len(l2) else ''
# LaTeX: replace $...$ by :math:`...`
# must be done after calling boostFuncSignature which uses original docstring
for i in range(0,len(lines)): lines[i]=replaceLaTeX(lines[i])
def boostFuncSignature(name,obj,removeSelf=False):
"""Scan docstring of obj, returning tuple of properly formatted boost python signature
(first line of the docstring) and the rest of docstring (as list of lines).
The rest of docstring is stripped of 4 leading spaces which are automatically
added by boost.
removeSelf will attempt to remove the first argument from the signature.
"""
doc=obj.__doc__
if doc==None: # not a boost method
return None,None
nname=name.split('.')[-1]
docc=doc.split('\n')
if len(docc)<2: return None,docc
doc1=docc[1]
# functions with weird docstring, likely not documented by boost
if not re.match('^'+nname+r'(.*)->.*$',doc1):
return None,docc
if doc1.endswith(':'): doc1=doc1[:-1]
strippedDoc=doc.split('\n')[2:]
# check if all lines are padded
allLinesHave4LeadingSpaces=True
for l in strippedDoc:
if l.startswith(' '): continue
allLinesHave4LeadingSpaces=False; break
# remove the padding if so
if allLinesHave4LeadingSpaces: strippedDoc=[l[4:] for l in strippedDoc]
for i in range(len(strippedDoc)):
# fix signatures inside docstring (one function with multiple signatures)
strippedDoc[i],n=re.subn(r'([a-zA-Z_][a-zA-Z0-9_]*\() \(object\)arg1(, |)',r'\1',strippedDoc[i].replace('->','→'))
# inspect dosctring after mangling
if 'getViscoelasticFromSpheresInteraction' in name and False:
print name
print strippedDoc
print '======================'
for l in strippedDoc: print l
print '======================'
sig=doc1.split('(',1)[1]
if removeSelf:
# remove up to the first comma; if no comma present, then the method takes no arguments
# if [ precedes the comma, add it to the result (ugly!)
try:
ss=sig.split(',',1)
if ss[0].endswith('['): sig='['+ss[1]
else: sig=ss[1]
except IndexError:
# grab the return value
try:
sig=') -> '+sig.split('->')[-1]
#if 'Serializable' in name: print 1000*'#',name
except IndexError:
sig=')'
return '('+sig,strippedDoc
def fixSignature(app, what, name, obj, options, signature, return_annotation):
#print what,name,obj,signature#,dir(obj)
if what=='attribute':
doc=unicode(obj.__doc__)
ret=''
m=re.match('.*:ydefault:`(.*?)`.*',doc)
if m:
typ=''
#try:
# clss='.'.join(name.split('.')[:-1])
# instance=eval(clss+'()')
# typ='; '+getattr(instance,name.split('.')[-1]).__class__.__name__
# if typ=='; NoneType': typ=''
#except TypeError: ##no registered converted
# typ=''
dfl=m.group(1)
m2=re.match(r'\s*\(\s*\(\s*void\s*\)\s*\"(.*)\"\s*,\s*(.*)\s*\)\s*',dfl)
if m2: dfl="%s, %s"%(m2.group(2),m2.group(1))
if dfl!='': ret+=' (='+dfl+'%s)'%typ
else: ret+=' (=uninitalized%s)'%typ
#m=re.match('.*\[(.{,8})\].*',doc)
#m=re.match('.*:yunit:`(.?*)`.*',doc)
#if m:
# units=m.group(1)
# print '@@@@@@@@@@@@@@@@@@@@@',name,units
# ret+=' ['+units+']'
return ret,None
elif what=='class':
ret=[]
if len(obj.__bases__)>0:
base=obj.__bases__[0]
while base.__module__!='Boost.Python':
ret+=[base.__name__]
if len(base.__bases__)>0: base=base.__bases__[0]
else: break
if len(ret):
return ' (inherits '+u' → '.join(ret)+')',None
else: return None,None
elif isBoostFunc(what,obj):
sig=boostFuncSignature(name,obj)[0] or ' (wrapped c++ function)'
return sig,None
elif isBoostMethod(what,obj):
sig=boostFuncSignature(name,obj,removeSelf=True)[0]
return sig,None
#else: print what,name,obj.__repr__()
#return None,None
from sphinx import addnodes
def parse_ystaticattr(env,attr,attrnode):
m=re.match(r'([a-zA-Z0-9_]+)\.(.*)\(=(.*)\)',attr)
if not m:
print 100*'@'+' Static attribute %s not matched'%attr
attrnode+=addnodes.desc_name(attr,attr)
klass,name,default=m.groups()
#attrnode+=addnodes.desc_type('static','static')
attrnode+=addnodes.desc_name(name,name)
plist=addnodes.desc_parameterlist()
if default=='': default='unspecified'
plist+=addnodes.desc_parameter('='+default,'='+default)
attrnode+=plist
attrnode+=addnodes.desc_annotation(' [static]',' [static]')
return klass+'.'+name
#############################
## set tab size
###################
## http://groups.google.com/group/sphinx-dev/browse_thread/thread/35b8071ffe9a8feb
def setup(app):
from sphinx.highlighting import lexers
from pygments.lexers.compiled import CppLexer
lexers['cpp'] = CppLexer(tabsize=3)
lexers['c++'] = CppLexer(tabsize=3)
from pygments.lexers.agile import PythonLexer
lexers['python'] = PythonLexer(tabsize=3)
app.connect('source-read',fixSrc)
app.connect('autodoc-skip-member',customExclude)
app.connect('autodoc-process-signature',fixSignature)
app.connect('autodoc-process-docstring',fixDocstring)
app.add_description_unit('ystaticattr',None,objname='static attribute',indextemplate='pair: %s; static method',parse_node=parse_ystaticattr)
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#
# HACK: change ipython console regexp from ipython_console_highlighting.py
import re
sys.path.append(os.path.abspath('.'))
import yade.config
if 1:
if yade.runtime.ipython_version<12:
import ipython_directive as id
else:
if 12<=yade.runtime.ipython_version<13:
import ipython_directive012 as id
elif 13<=yade.runtime.ipython_version<200:
import ipython_directive013 as id
else:
import ipython_directive200 as id
#The next four lines are for compatibility with IPython 0.13.1
ipython_rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
ipython_rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
ipython_promptin ='Yade [%d]:'
ipython_promptout=' -> [%d]: '
ipython_cont_spaces=' '
#For IPython <=0.12, the following lines are used
id.rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
id.rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
id.rgxcont=re.compile(r'(?: +)\.\.+:\s?(.*)\s*')
id.fmtin ='Yade [%d]:'
id.fmtout =' -> [%d]: ' # for some reason, out and cont must have the trailing space
id.fmtcont=' .\D.: '
id.rc_override=dict(prompt_in1="Yade [\#]:",prompt_in2=" .\D.:",prompt_out=r" -> [\#]: ")
if yade.runtime.ipython_version<12:
id.reconfig_shell()
import ipython_console_highlighting as ich
ich.IPythonConsoleLexer.input_prompt = re.compile("(Yade \[[0-9]+\]: )")
ich.IPythonConsoleLexer.output_prompt = re.compile("(( -> |Out)|\[[0-9]+\]: )")
ich.IPythonConsoleLexer.continue_prompt = re.compile("\s+\.\.\.+:")
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.graphviz',
'sphinx.ext.viewcode',
'sphinx.ext.inheritance_diagram',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.only_directives',
#'matplotlib.sphinxext.mathmpl',
'ipython_console_highlighting',
'youtube',
'sphinx.ext.todo',
]
if yade.runtime.ipython_version<12:
extensions.append('ipython_directive')
else:
if 12<=yade.runtime.ipython_version<13:
extensions.append('ipython_directive012')
elif 13<=yade.runtime.ipython_version<200:
extensions.append('ipython_directive013')
else:
extensions.append('ipython_directive200')
# the sidebar extension
if False:
if writer=='html':
extensions+=['sphinx.ext.sidebar']
sidebar_all=True
sidebar_relling=True
#sidebar_abbrev=True
sidebar_tocdepth=3
## http://trac.sagemath.org/sage_trac/attachment/ticket/7549/trac_7549-doc_inheritance_underscore.patch
# GraphViz includes dot, neato, twopi, circo, fdp.
graphviz_dot = 'dot'
inheritance_graph_attrs = { 'rankdir' : 'BT' }
inheritance_node_attrs = { 'height' : 0.5, 'fontsize' : 12, 'shape' : 'oval' }
inheritance_edge_attrs = {}
my_latex_preamble=r'''
\usepackage{euler} % must be loaded before fontspec for the whole doc (below); this must be kept for pngmath, however
\usepackage{hyperref}
\usepackage{amsmath}
\usepackage{amsbsy}
%\usepackage{mathabx}
\usepackage{underscore}
\usepackage[all]{xy}
% Metadata of the pdf output
\hypersetup{pdftitle={Yade Documentation}}
\hypersetup{pdfauthor={V. Smilauer, E. Catalano, B. Chareyre, S. Dorofeenko, J. Duriez, A. Gladky, J. Kozicki, C. Modenese, L. Scholtes, L. Sibille, J. Stransky, K. Thoeni}}
% symbols
\let\mat\boldsymbol % matrix
\let\vec\boldsymbol % vector
\let\tens\boldsymbol % tensor
\def\normalized#1{\widehat{#1}}
\def\locframe#1{\widetilde{#1}}
% timestep
\def\Dt{\Delta t}
\def\Dtcr{\Dt_{\rm cr}}
% algorithm complexity
\def\bigO#1{\ensuremath{\mathcal{O}(#1)}}
% variants for greek symbols
\let\epsilon\varepsilon
\let\theta\vartheta
\let\phi\varphi
% shorthands
\let\sig\sigma
\let\eps\epsilon
% variables at different points of time
\def\prev#1{#1^-}
\def\pprev#1{#1^\ominus}
\def\curr#1{#1^{\circ}}
\def\nnext#1{#1^\oplus}
\def\next#1{#1^+}
% shorthands for geometry
\def\currn{\curr{\vec{n}}}
\def\currC{\curr{\vec{C}}}
\def\uT{\vec{u}_T}
\def\curruT{\curr{\vec{u}}_T}
\def\prevuT{\prev{\vec{u}}_T}
\def\currn{\curr{\vec{n}}}
\def\prevn{\prev{\vec{n}}}
% motion
\def\pprevvel{\pprev{\dot{\vec{u}}}}
\def\nnextvel{\nnext{\dot{\vec{u}}}}
\def\curraccel{\curr{\ddot{\vec{u}}}}
\def\prevpos{\prev{\vec{u}}}
\def\currpos{\curr{\vec{u}}}
\def\nextpos{\next{\vec{u}}}
\def\curraaccel{\curr{\dot{\vec{\omega}}}}
\def\pprevangvel{\pprev{\vec{\omega}}}
\def\nnextangvel{\nnext{\vec{\omega}}}
\def\loccurr#1{\curr{\locframe{#1}}}
\def\numCPU{n_{\rm cpu}}
\DeclareMathOperator{\Align}{Align}
\DeclareMathOperator{\sign}{sgn}
% sorting algorithms
\def\isleq#1{\currelem{#1}\ar@/^/[ll]^{\leq}}
\def\isnleq#1{\currelem{#1}\ar@/^/[ll]^{\not\leq}}
\def\currelem#1{\fbox{$#1$}}
\def\sortSep{||}
\def\sortInv{\hbox{\phantom{||}}}
\def\sortlines#1{\xymatrix@=3pt{#1}}
\def\crossBound{||\mkern-18mu<}
'''
pngmath_latex_preamble=r'\usepackage[active]{preview}'+my_latex_preamble
pngmath_use_preview=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index-toctree'
# General information about the project.
project = u'Yade'
copyright = u'2009, Václav Šmilauer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = yade.config.version
# The full version, including alpha/beta/rc tags.
release = yade.config.revision
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['yade.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'stickysidebar':'true','collapsiblesidebar':'true','rightsidebar':'false'}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'fig/yade-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'fig/yade-favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static-html']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_index='index.html'
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = { 'index':'index.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Yadedoc'
# -- Options for LaTeX output --------------------------------------------------
my_maketitle=r'''
\begin{titlepage}
\begin{flushright}
\hrule{}
% Upper part of the page
\begin{flushright}
\includegraphics[width=0.15\textwidth]{yade-logo.png}\par
\end{flushright}
\vspace{20 mm}
\text{\sffamily\bfseries\Huge Yade Documentation}\\
\vspace{5 mm}
\vspace{70 mm}
\begin{sffamily}\bfseries\Large
V\'{a}clav \v{S}milauer, Emanuele Catalano, Bruno Chareyre, Sergei Dorofeenko, Jerome Duriez, Anton Gladky, Janek Kozicki, Chiara Modenese, Luc Scholt\`{e}s, Luc Sibille, Jan Str\'{a}nsk\'{y}, Klaus Thoeni
\end{sffamily}
\vspace{20 mm}
\hrule{}
\vfill
% Bottom of the page
\textit{\Large Release '''\
+yade.config.revision\
+r''', \today}
\end{flushright}
\end{titlepage}
\text{\sffamily\bfseries\LARGE Authors}\\
\\
\text{\sffamily\bfseries\Large V\'{a}clav \v{S}milauer}\\
\text{\sffamily\Large Freelance consultant (http://woodem.eu)}\\
\\
\text{\sffamily\bfseries\Large Emanuele Catalano}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Bruno Chareyre}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Sergei Dorofeenko}\\
\text{\sffamily\Large IPCP RAS, Chernogolovka}\\
\\
\text{\sffamily\bfseries\Large Jerome Duriez}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Anton Gladky}\\
\text{\sffamily\Large TU Bergakademie Freiberg}\\
\\
\text{\sffamily\bfseries\Large Janek Kozicki}\\
\text{\sffamily\Large Gdansk University of Technology - lab. 3SR Grenoble University }\\
\\
\text{\sffamily\bfseries\Large Chiara Modenese}\\
\text{\sffamily\Large University of Oxford}\\
\\
\text{\sffamily\bfseries\Large Luc Scholt\`{e}s}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Luc Sibille}\\
\text{\sffamily\Large University of Nantes, lab. GeM}\\
\\
\text{\sffamily\bfseries\Large Jan Str\'{a}nsk\'{y}}\\
\text{\sffamily\Large CVUT Prague}\\
\\
\text{\sffamily\bfseries\Large Klaus Thoeni}
\text{\sffamily\Large The University of Newcastle (Australia)}\\
\text{\sffamily\bfseries\large Citing this document}\\
In order to let users cite Yade consistently in publications, we provide a list of bibliographic references for the different parts of the documentation. This way of acknowledging Yade is also a way to make developments and documentation of Yade more attractive for researchers, who are evaluated on the basis of citations of their work by others. We therefore kindly ask users to cite Yade as accurately as possible in their papers, as explained in http://yade-dem/doc/citing.html.
'''
latex_elements=dict(
papersize='a4paper',
fontpkg=r'''
\usepackage{euler}
\usepackage{fontspec,xunicode,xltxtra}
%\setmainfont[BoldFont={LMRoman10 Bold}]{CMU Concrete} %% CMU Concrete must be installed by hand as otf
''',
utf8extra='',
fncychap='',
preamble=my_latex_preamble,
footer='',
inputenc='',
fontenc='',
maketitle=my_maketitle,
)
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index-toctree', 'Yade.tex', u'Yade Documentation',
u'Václav Šmilauer', 'manual'),
('index-toctree_manuals', 'YadeManuals.tex', u'Yade Tutorial and Manuals',
u'Václav Šmilauer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'fig/yade-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| gpl-2.0 |
xray/xray | xarray/core/options.py | 1 | 5201 | import warnings
DISPLAY_WIDTH = "display_width"
ARITHMETIC_JOIN = "arithmetic_join"
ENABLE_CFTIMEINDEX = "enable_cftimeindex"
FILE_CACHE_MAXSIZE = "file_cache_maxsize"
WARN_FOR_UNCLOSED_FILES = "warn_for_unclosed_files"
CMAP_SEQUENTIAL = "cmap_sequential"
CMAP_DIVERGENT = "cmap_divergent"
KEEP_ATTRS = "keep_attrs"
DISPLAY_STYLE = "display_style"
OPTIONS = {
DISPLAY_WIDTH: 80,
ARITHMETIC_JOIN: "inner",
ENABLE_CFTIMEINDEX: True,
FILE_CACHE_MAXSIZE: 128,
WARN_FOR_UNCLOSED_FILES: False,
CMAP_SEQUENTIAL: "viridis",
CMAP_DIVERGENT: "RdBu_r",
KEEP_ATTRS: "default",
DISPLAY_STYLE: "html",
}
_JOIN_OPTIONS = frozenset(["inner", "outer", "left", "right", "exact"])
_DISPLAY_OPTIONS = frozenset(["text", "html"])
def _positive_integer(value):
return isinstance(value, int) and value > 0
_VALIDATORS = {
DISPLAY_WIDTH: _positive_integer,
ARITHMETIC_JOIN: _JOIN_OPTIONS.__contains__,
ENABLE_CFTIMEINDEX: lambda value: isinstance(value, bool),
FILE_CACHE_MAXSIZE: _positive_integer,
WARN_FOR_UNCLOSED_FILES: lambda value: isinstance(value, bool),
KEEP_ATTRS: lambda choice: choice in [True, False, "default"],
DISPLAY_STYLE: _DISPLAY_OPTIONS.__contains__,
}
def _set_file_cache_maxsize(value):
from ..backends.file_manager import FILE_CACHE
FILE_CACHE.maxsize = value
def _warn_on_setting_enable_cftimeindex(enable_cftimeindex):
warnings.warn(
"The enable_cftimeindex option is now a no-op "
"and will be removed in a future version of xarray.",
FutureWarning,
)
_SETTERS = {
FILE_CACHE_MAXSIZE: _set_file_cache_maxsize,
ENABLE_CFTIMEINDEX: _warn_on_setting_enable_cftimeindex,
}
def _get_keep_attrs(default):
global_choice = OPTIONS["keep_attrs"]
if global_choice == "default":
return default
elif global_choice in [True, False]:
return global_choice
else:
raise ValueError(
"The global option keep_attrs must be one of" " True, False or 'default'."
)
class set_options:
"""Set options for xarray in a controlled context.
Currently supported options:
- ``display_width``: maximum display width for ``repr`` on xarray objects.
Default: ``80``.
- ``arithmetic_join``: DataArray/Dataset alignment in binary operations.
Default: ``'inner'``.
- ``file_cache_maxsize``: maximum number of open files to hold in xarray's
global least-recently-usage cached. This should be smaller than your
system's per-process file descriptor limit, e.g., ``ulimit -n`` on Linux.
Default: 128.
- ``warn_for_unclosed_files``: whether or not to issue a warning when
unclosed files are deallocated (default False). This is mostly useful
for debugging.
- ``cmap_sequential``: colormap to use for nondivergent data plots.
Default: ``viridis``. If string, must be matplotlib built-in colormap.
Can also be a Colormap object (e.g. mpl.cm.magma)
- ``cmap_divergent``: colormap to use for divergent data plots.
Default: ``RdBu_r``. If string, must be matplotlib built-in colormap.
Can also be a Colormap object (e.g. mpl.cm.magma)
- ``keep_attrs``: rule for whether to keep attributes on xarray
Datasets/dataarrays after operations. Either ``True`` to always keep
attrs, ``False`` to always discard them, or ``'default'`` to use original
logic that attrs should only be kept in unambiguous circumstances.
Default: ``'default'``.
- ``display_style``: display style to use in jupyter for xarray objects.
Default: ``'text'``. Other options are ``'html'``.
You can use ``set_options`` either as a context manager:
>>> ds = xr.Dataset({"x": np.arange(1000)})
>>> with xr.set_options(display_width=40):
... print(ds)
<xarray.Dataset>
Dimensions: (x: 1000)
Coordinates:
* x (x) int64 0 1 2 3 4 5 6 ...
Data variables:
*empty*
Or to set global options:
>>> xr.set_options(display_width=80)
"""
def __init__(self, **kwargs):
self.old = {}
for k, v in kwargs.items():
if k not in OPTIONS:
raise ValueError(
"argument name %r is not in the set of valid options %r"
% (k, set(OPTIONS))
)
if k in _VALIDATORS and not _VALIDATORS[k](v):
if k == ARITHMETIC_JOIN:
expected = f"Expected one of {_JOIN_OPTIONS!r}"
elif k == DISPLAY_STYLE:
expected = f"Expected one of {_DISPLAY_OPTIONS!r}"
else:
expected = ""
raise ValueError(
f"option {k!r} given an invalid value: {v!r}. " + expected
)
self.old[k] = OPTIONS[k]
self._apply_update(kwargs)
def _apply_update(self, options_dict):
for k, v in options_dict.items():
if k in _SETTERS:
_SETTERS[k](v)
OPTIONS.update(options_dict)
def __enter__(self):
return
def __exit__(self, type, value, traceback):
self._apply_update(self.old)
| apache-2.0 |
cloud-fan/spark | python/pyspark/pandas/tests/data_type_ops/test_binary_ops.py | 1 | 6682 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class BinaryOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def pser(self):
return pd.Series([b"1", b"2", b"3"])
@property
def psser(self):
return ps.from_pandas(self.pser)
def test_add(self):
psser = self.psser
pser = self.pser
self.assert_eq(psser + b"1", pser + b"1")
self.assert_eq(psser + psser, pser + pser)
self.assert_eq(psser + psser.astype("bytes"), pser + pser.astype("bytes"))
self.assertRaises(TypeError, lambda: psser + "x")
self.assertRaises(TypeError, lambda: psser + 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser + psser)
self.assert_eq(self.psser + self.psser, self.pser + self.pser)
def test_sub(self):
self.assertRaises(TypeError, lambda: self.psser - "x")
self.assertRaises(TypeError, lambda: self.psser - 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser - psser)
def test_mul(self):
self.assertRaises(TypeError, lambda: self.psser * "x")
self.assertRaises(TypeError, lambda: self.psser * 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser * psser)
def test_truediv(self):
self.assertRaises(TypeError, lambda: self.psser / "x")
self.assertRaises(TypeError, lambda: self.psser / 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser / psser)
def test_floordiv(self):
self.assertRaises(TypeError, lambda: self.psser // "x")
self.assertRaises(TypeError, lambda: self.psser // 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser // psser)
def test_mod(self):
self.assertRaises(TypeError, lambda: self.psser % "x")
self.assertRaises(TypeError, lambda: self.psser % 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser % psser)
def test_pow(self):
self.assertRaises(TypeError, lambda: self.psser ** "x")
self.assertRaises(TypeError, lambda: self.psser ** 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser ** psser)
def test_radd(self):
self.assert_eq(b"1" + self.psser, b"1" + self.pser)
self.assertRaises(TypeError, lambda: "x" + self.psser)
self.assertRaises(TypeError, lambda: 1 + self.psser)
def test_rsub(self):
self.assertRaises(TypeError, lambda: "x" - self.psser)
self.assertRaises(TypeError, lambda: 1 - self.psser)
def test_rmul(self):
self.assertRaises(TypeError, lambda: "x" * self.psser)
self.assertRaises(TypeError, lambda: 2 * self.psser)
def test_rtruediv(self):
self.assertRaises(TypeError, lambda: "x" / self.psser)
self.assertRaises(TypeError, lambda: 1 / self.psser)
def test_rfloordiv(self):
self.assertRaises(TypeError, lambda: "x" // self.psser)
self.assertRaises(TypeError, lambda: 1 // self.psser)
def test_rmod(self):
self.assertRaises(TypeError, lambda: 1 % self.psser)
def test_rpow(self):
self.assertRaises(TypeError, lambda: "x" ** self.psser)
self.assertRaises(TypeError, lambda: 1 ** self.psser)
def test_and(self):
self.assertRaises(TypeError, lambda: self.psser & True)
self.assertRaises(TypeError, lambda: self.psser & False)
self.assertRaises(TypeError, lambda: self.psser & self.psser)
def test_rand(self):
self.assertRaises(TypeError, lambda: True & self.psser)
self.assertRaises(TypeError, lambda: False & self.psser)
def test_or(self):
self.assertRaises(TypeError, lambda: self.psser | True)
self.assertRaises(TypeError, lambda: self.psser | False)
self.assertRaises(TypeError, lambda: self.psser | self.psser)
def test_ror(self):
self.assertRaises(TypeError, lambda: True | self.psser)
self.assertRaises(TypeError, lambda: False | self.psser)
def test_from_to_pandas(self):
data = [b"1", b"2", b"3"]
pser = pd.Series(data)
psser = ps.Series(data)
self.assert_eq(pser, psser.to_pandas())
self.assert_eq(ps.from_pandas(pser), psser)
def test_astype(self):
pser = self.pser
psser = self.psser
self.assert_eq(pd.Series(["1", "2", "3"]), psser.astype(str))
self.assert_eq(pser.astype("category"), psser.astype("category"))
cat_type = CategoricalDtype(categories=[b"2", b"3", b"1"])
self.assert_eq(pser.astype(cat_type), psser.astype(cat_type))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.data_type_ops.test_binary_ops import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
jvkersch/hsmmlearn | docs/conf.py | 1 | 9948 | # -*- coding: utf-8 -*-
#
# hsmmlearn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 1 17:33:24 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Avoid using C libraries on RTD
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['numpy', 'scipy', 'scipy.stats', 'matplotlib',
'matplotlib.pyplot', 'hsmmlearn.base']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'hsmmlearn'
copyright = u'2016, Joris Vankerschaver'
author = u'Joris Vankerschaver'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify
# it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'hsmmlearndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'hsmmlearn.tex', u'hsmmlearn Documentation',
u'Joris Vankerschaver', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hsmmlearn', u'hsmmlearn Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'hsmmlearn', u'hsmmlearn Documentation',
author, 'hsmmlearn', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 |
panmari/tensorflow | tensorflow/examples/skflow/boston.py | 1 | 1485 | # Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn import datasets, cross_validation, metrics
from sklearn import preprocessing
from tensorflow.contrib import skflow
# Load dataset
boston = datasets.load_boston()
X, y = boston.data, boston.target
# Split dataset into train / test
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y,
test_size=0.2, random_state=42)
# scale data (training set) to 0 mean and unit Std. dev
scaler = preprocessing.StandardScaler()
X_train = scaler.fit_transform(X_train)
# Build 2 layer fully connected DNN with 10, 10 units respecitvely.
regressor = skflow.TensorFlowDNNRegressor(hidden_units=[10, 10],
steps=5000, learning_rate=0.1, batch_size=1)
# Fit
regressor.fit(X_train, y_train)
# Predict and score
score = metrics.mean_squared_error(regressor.predict(scaler.fit_transform(X_test)), y_test)
print('MSE: {0:f}'.format(score))
| apache-2.0 |
jmrozanec/white-bkg-classification | scripts/preprocessing.py | 1 | 1441 | #https://github.com/tflearn/tflearn/issues/180
from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.data_utils import shuffle, to_categorical
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization, batch_normalization
from tflearn.layers.estimator import regression
from tflearn.data_utils import image_preloader
import skimage
from skimage import data
from skimage import filters
import os
from skimage import io
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
reds="../images/pictures/red/"
greens="../images/pictures/green/"
redshist="../images/histograms/red/"
greenshist="../images/histograms/green/"
directory=reds
histdirectory=redshist
for filename in os.listdir(directory):
if filename.endswith(".jpg"):
img = io.imread(os.path.join(directory, filename))
hist, bin_edges = np.histogram(img, bins=255)
bin_centers = 0.5*(bin_edges[:-1] + bin_edges[1:])
binary_img = img > 0.8
plt.figure(figsize=(1,1))
fig, ax = plt.subplots(nrows=1, ncols=1) #http://stackoverflow.com/questions/9622163/save-plot-to-image-file-instead-of-displaying-it-using-matplotlib-so-it-can-be
plt.plot(bin_centers, hist, lw=2)
fig.savefig(os.path.join(histdirectory, filename), bbox_inches='tight')
plt.close()
else:
continue
| apache-2.0 |
dimonaks/siman | siman/functions.py | 1 | 29689 |
from __future__ import division, unicode_literals, absolute_import
import os, tempfile, copy, math, itertools, sys
import numpy as np
from operator import itemgetter
from itertools import product
try:
import scipy
except:
print('functions.py: no scipy, smoother() will not work()')
from siman import header
from siman.header import print_and_log, printlog, runBash, eV_A_to_J_m
from siman.small_functions import is_list_like, is_string_like, gunzip_file, makedir, grep_file, setting_sshpass
def unique_elements(seq, idfun=None):
# return only unique_elements order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
def smoother(x, n, mul = 1, align = 1):
"""
mul - additionally multiplies values
#align - find first non-zero point and return it to zero
#n - smooth value,
if algo = 'gaus' than it is sigma
use something like 0.8
if algo = 'my'
n of 10-15 is good
"""
algo = 'gaus'
# algo = 'my'
if algo == 'my':
x_smooth = []
L = len(x)
store = np.zeros((n,1),float)
for u in range(L-n):
for v in range(n):
store[v] = x[u+v]
av = float(sum(store)) / n
x_smooth.append(av*mul)
for u in range(L-n,L):
for v in range(L-u-1):
store[v] = x[u+v]
av = float(sum(store)) / n
x_smooth.append(av*mul)
elif algo == 'gaus':
x_smooth =x
# x_smooth = scipy.ndimage.filters.median_filter(x,size =4)
# print('sigma is ', n)
x_smooth = scipy.ndimage.filters.gaussian_filter1d(x_smooth, n, order =0)
# x_smooth = scipy.ndimage.interpolation.spline_filter1d(x, 4)
else:
x_smooth = x
if align:
# print(x_smooth[0])
x_smooth[0] = 0
# sys.exit()
return np.asarray(x_smooth)
def run_on_server(command, addr = None):
printlog('Running', command, 'on server ...')
command = command.replace('\\', '/') # make sure is POSIX
# sys.exit()
# print(header.sshpass)
# sys.exit()
if addr is None:
addr = header.cluster_address
if header.ssh_object:
# printlog('Using paramiko ...', imp = 'y')
# if 'ne' in header.warnings:
# sys.exit()
out = header.ssh_object.run(command, noerror = True, printout = 'ne' in header.warnings)
elif header.sshpass and header.sshpass == 'proxy':
com = 'ssh -tt sdv sshpass -f '+ header.path2pass +' ssh '+addr+' "'+command+'"'
# print(com)
# sys.exit()
out = runBash(com)
# print(out)
out = out.split('Connection to')[0] # remove last message Connection to ipaddress closed
# sys.exit()
elif header.sshpass:
com = 'sshpass -f '+header.path2pass+' ssh '+addr+' "'+command+'"'
# print(com)
# sys.exit()
out = runBash(com)
# sys.exit()
else:
bash_comm = 'ssh '+addr+' "'+command+'"'
# print(bash_comm)
# sys.exit()
out = runBash(bash_comm)
out = out.split('#')[-1].strip()
printlog(out)
# print(out)
# sys.exit()
return out
def push_to_server(files = None, to = None, addr = None):
"""
if header.ssh_object then use paramiko
to (str) - path to remote folder !
"""
if not is_list_like(files):
files = [files]
to = to.replace('\\', '/') # make sure is POSIX
files_str = ' '.join(np.array(files ))
command = ' mkdir -p {:}'.format( to )
# print('asfsadfdsf', to)
printlog('push_to_server():', command, run_on_server(command, addr))
# sys.exit()
printlog('push_to_server(): uploading files ', files, 'to', addr, to)
if header.ssh_object:
for file in files:
# print(file, to)
header.ssh_object.put(file, to+'/'+os.path.basename(file) )
out = ''
elif header.sshpass and header.sshpass == 'proxy':
com = 'tar cf - '+ files_str + ' | ssh sdv "sshpass -f ~/.ssh/p ssh '+addr+' \\"cd '+header.cluster_home+' && tar xvf -\\"" '
# print(com)
# sys.exit()
out = runBash(com)
# print(out)
# sys.exit()
elif header.sshpass:
# if '@' not in addr:
# printlog('Error! Please provide address in the form user@address')
# l = addr.split('@')
# print(l)
# user = l[0]
# ad = l[1]
# com = 'rsync --rsh='+"'sshpass -f /home/aksenov/.ssh/p ssh' " +' -uaz '+files_str+ ' '+addr+':'+to
com = 'rsync --rsh='+"'sshpass -f "+header.path2pass+" ssh' " +' -uaz '+files_str+ ' '+addr+':'+to
# print(com)
# sys.exit()
out = runBash(com)
else:
out = runBash('rsync -uaz '+files_str+ ' '+addr+':'+to)
printlog(out)
return out
def file_exists_on_server(file, addr):
file = file.replace('\\', '/') # make sure is POSIX
printlog('Checking existence of file', file, 'on server', addr )
exist = run_on_server(' ls '+file, addr)
# if header.ssh_object:
# exist = header.ssh_object.fexists(file)
# else:
# exist = runBash('ssh '+addr+' ls '+file)
if 'No such file' in exist:
exist = ''
else:
exist = 'file exists'
if exist:
res = True
else:
res = False
printlog('File exist? ', res)
return res
def get_from_server(files = None, to = None, to_file = None, addr = None, trygz = True):
"""
Download files using either paramiko (higher priority) or rsync;
For paramiko header.ssh_object should be defined
files (list of str) - files on cluster to download
to (str) - path to local folder !
to_file (str) - path to local file (if name should be changed); in this case len(files) should be 1
The gz file is also checked
RETURN
result of download
TODO:
now for each file new connection is opened,
copy them in one connection
"""
# print(addr)
# sys.exit()
def download(file, to_file):
# print(header.sshpass)
if header.ssh_object:
exist = file_exists_on_server(file, addr)
# try:
if exist:
printlog('Using paramiko: ssh_object.get(): from to ', file, to_file)
header.ssh_object.get(file, to_file )
out = ''
# except FileNotFoundError:
else:
out = 'error, file not found'
elif header.sshpass and header.sshpass == 'proxy':
# com = 'ssh sdv "sshpass -f ~/.ssh/p ssh ' + addr + ' \\"tar zcf - '+ file +'\\"" | tar zxf - '+to_file # does not work?
com = 'ssh sdv "sshpass -f ~/.ssh/p ssh ' + addr + ' \\"tar cf - '+ file +'\\"" > '+to_file
# print('sshpass',com)
# sys.exit()
out = runBash(com)
elif header.sshpass:
#com = 'rsync --rsh='+"'sshpass -f /home/aksenov/.ssh/p ssh' " +' -uaz '+addr+':'+file+ ' '+to_file
com = 'rsync --rsh='+"'sshpass -f "+header.path2pass+" ssh' " +' -uaz '+addr+':'+file+ ' '+to_file
out = runBash(com)
# print(addr)
# sys.exit()
else:
# print(addr,file,to_file)
out = runBash('rsync -uaz '+addr+':'+file+ ' '+to_file)
if 'error' in out:
res = out
else:
res = 'OK'
out = ''
printlog('Download result is ', res)
return out
if '*' in files:
printlog('get_from_server(): get by template')
files = run_on_server('ls '+files, addr).splitlines()
# print(files)
# sys.exit()
printlog('get_from_server(): I download', files)
elif not is_list_like(files):
files = [files]
files = [file.replace('\\', '/') for file in files] #make sure the path is POSIX
files_str = ', '.join(np.array(files ))
printlog('Trying to download', files_str, 'from server', imp = 'n')
for file in files:
if not to and not to_file: #use temporary file
with tempfile.NamedTemporaryFile() as f:
to_file_l = f.name #system independent filename
elif not to_file: #obtain filename
to_file_l = os.path.join(to, os.path.basename(file) )
else:
to_file_l = to_file
makedir(to_file_l)
out = download(file, to_file_l)
if out and trygz:
printlog('File', file, 'does not exist, trying gz', imp = 'n')
# run_on_server
files = run_on_server(' ls '+file+'*', addr)
file = files.split()[-1]
# print(file)
nz = file.count('gz')
ext = '.gz'*nz
# file+='.gz'
to_file_l+=ext
if file:
out = download(file, to_file_l)
printlog(' gz found with multiplicity', ext, imp = 'n')
for i in range(nz):
printlog('unzipping', to_file_l)
gunzip_file(to_file_l)
to_file_l = to_file_l[:-3]
else:
printlog(' No gz either!', imp = 'n')
# if '5247' in file:
# sys.exit()
return out
def salary_inflation():
"""Calculate salary growth in Russia taking into account inflation"""
inflation2000_2014 = [
5.34,
6.45,
6.58,
6.10,
8.78,
8.80,
13.28,
11.87,
9.00 ,
10.91,
11.74,
11.99,
15.06,
18.8,
20.1]
init_salary = 1500 # in jan 2000; other sources 2000 - very important
for i, l in enumerate( reversed(inflation2000_2014) ):
init_salary = (1+l/100)*init_salary
print( init_salary, i+2000)
salary2014 = 30000
increase = salary2014/init_salary
print( increase)
# salary_inflation()
def element_name_inv(el):
el_dict = header.el_dict
nu_dict = header.nu_dict
# print type(el), el, type(str('sdf') )
if is_string_like(el):
try:
elinv = el_dict[el]
except:
print_and_log("Error! Unknown element: " +str(el))
raise RuntimeError
else:
el = int(el)
try:
elinv = nu_dict[el]
except:
print_and_log("Error! Unknown element: "+str(el))
raise RuntimeError
return elinv # inversed notion of element
invert = element_name_inv
def return_atoms_to_cell(st):
st = st.return_atoms_to_cell()
return st
def calc_ac(a1, c1, a2, c2, a_b = 0.1, c_b = 0.1, type = "two_atoms"):
"""
Calculate values of hexagonal lattice parameters for cell with two different atoms.
The used assumption is:
1. Provided lattice constants are for large enougth cells, in which excess volume (dV) of impurity does not depend on the size of cell.
2. Two atoms do not interact with each other, which allows to use dV(CO) = dV(C) + dV(O)
Two regimes:
two_atoms - calculate cell sizes if additional atom was added
double_cell - if cell was doubled; only first cell and second_cell are needed
Input:
a1, c1 - lattice constants of cell with first impurity atom (first cell)
a2, c2 - lattice constants of cell with second impurity atom (second cell)
a_b, c_b - lattice constants of cell with pure hexagonal metall
Output:
a, c - lattice constants of cell with two atoms
"""
hstring = ("%s #on %s"% (traceback.extract_stack(None, 2)[0][3], datetime.date.today() ) )
if hstring != header.history[-1]: header.history.append( hstring )
A = (a1**2 * c1) + (a2**2 * c2) - (a_b**2 * c_b)
B = 0.5 * (c1/a1 + c2/a2)
C = ( (a1**2 * c1) + (a2**2 * c2) ) * 0.5 #sum of cell volumes divided by 2 since during the construction of new cell we will use multiplication by 2
# print "A,B=",A,B
a = (A/B)**(1./3)
c = a * B
a = round(a,5)
c = round(c,5)
print_and_log( "a, c, c/a for cell with pure hcp ", a_b, c_b, round(c_b/a_b,4), imp ='y' )
print_and_log( "a, c, c/a for cell with first atom ", a1, c1, round(c1/a1,4), imp ='y' )
print_and_log( "a, c, c/a for cell with second atom ", a2, c2, round(c2/a2,4), imp ='y' )
#for double cell
a3 = (C/B)**(1./3)
c3 = a3 * B
a3 = round(a3,5)
c3 = round(c3,5)
if type == "two_atoms":
print_and_log( "a, c, c/a for cell with two atoms ", a, c, round(c/a,4), "# the same cell but with two atoms\n", imp ='y')
elif type == "double_cell":
print_and_log( "a, c, c/a for new cell ", a3, c3, round(c3/a3,4), "# for cell with V = V(first_cell) + V(second cell), but only for the case if V(second cell) == V(first_cell)", imp ='y')
return a, c
def read_charge_den_vasp():
"""
Read CHG vasp file and return ChargeDen object
"""
class ChargeDen():
"""docstring for ChargeDen"""
def __init__(self, ):
# self.arg = arg
pass
def rotation_matrix(axis,theta):
axis = axis/math.sqrt(np.dot(axis,axis))
a = math.cos(theta/2)
b,c,d = -axis*math.sin(theta/2)
return np.array([[a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)],
[2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)],
[2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c]])
def rotate():
v = np.array([3,5,0])
axis = np.array([4,4,1])
theta = 1.2
print(np.dot(rotation_matrix(axis,theta),v))
# [ 2.74911638 4.77180932 1.91629719]
def rotation_matrix_from_vectors(vec1, vec2):
""" Find the rotation matrix that aligns vec1 to vec2
:param vec1: A 3d "source" vector
:param vec2: A 3d "destination" vector
:return mat: A transform matrix (3x3) which when applied to vec1, aligns it with vec2.
"""
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
return rotation_matrix
def plot_charge_den():
"""Test function; Was not used"""
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
# print X
# print Y
# print Z
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3)
# cset = ax.contourf(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)
# cset = ax.contourf(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)
# cset = ax.contourf(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)
ax.set_xlabel('X')
ax.set_xlim(-40, 40)
ax.set_ylabel('Y')
ax.set_ylim(-40, 40)
ax.set_zlabel('Z')
ax.set_zlim(-100, 100)
plt.show()
return
def plot_interaction(calclist, calc):
"""
For calculation of interaction parameter alpha;
Take in mind that this parameter is obtained under aproximation of redular solution
"""
e_seg = []
dX = []
for id in calclist:
Xgb = calc[id].Xgb
X = calc[id].X
dX.append(Xgb/1 - X)
e_seg.append(calc[id].e_seg)
# print calc[id].e_seg
# print calc[id].X
#print dX
coeffs1 = np.polyfit(dX, e_seg, 1)
fit_func1 = np.poly1d(coeffs1)
print( "list of seg energies: ", e_seg )
print( "list of dX : ", dX )
print( "Fitting using linear function:" )
print( fit_func1 )
print( "E_seg0 = {0:0.0f} meV, standart enthalpy of segregation".format(fit_func1[0]) )
print( "alpha = {0:0.0f} meV, interaction coefficient".format(-fit_func1[1]/2) )
return
def calculate_voronoi(self, state = 'end'):
# By default two quantities per atom are calculated by this compute.
# The first is the volume of the Voronoi cell around each atom.
# Any point in an atom's Voronoi cell is closer to that atom than any other.
# The second is the number of faces of the Voronoi cell, which
# is also the number of nearest neighbors of the atom in the middle of the cell.
# state - init or end; if init then saved in self.init.vorovol; if end than saved in self.vorovol
write_lammps(self, state, filepath = 'voronoi_analysis/structure.lammps') #write structure for lammps
runBash("rm voronoi_analysis/dump.voro; /home/aksenov/installed/lammps-1Feb14/src/lmp_serial < voronoi_analysis/voronoi.in > voronoi_analysis/log")
if state == 'end':
self.vorovol = []
self.vorofaces = []
vorovol = self.vorovol
vorofaces = self.vorofaces
elif state == 'init':
self.init.vorovol = []
self.init.vorofaces = []
vorovol = self.init.vorovol
vorofaces = self.init.vorofaces
vsum=0
wlist = []
with open('voronoi_analysis/dump.voro','r') as volfile: #analyze dump.voro
for line in volfile:
if 'ITEM: ATOMS ' in line:
break
for line in volfile:
ll = line.split()
if int(ll[1]) > 1:
wlist.append( [ll[0], ll[5], ll[6], ll[2]] )
# print 'Volume of atom ',ll[0],'is', ll[5]
vsum= vsum+float(ll[5])
print_and_log( 'Check total volume ', vsum, self.end.vol)
wlist.sort(key = itemgetter(0)) #sort according to the position of atoms
print_and_log( "atom #, voronoi vol, voronoi faces, x coordinate: ", )
print_and_log( wlist)
for w in wlist:
vorovol.append(float(w[1]))
vorofaces.append(int(w[2]))
# print 'Voro vol ',self.end.vorovol
# print 'Voro faces',self.end.vorofaces
# print len(wlist)
if hasattr(self, 'vorovol'):
voro = ''
if len(vorovol) == 2: #C and O
voro = " {0:5.2f} & {1:2d} & {2:5.2f} & {3:2d} ".format(vorovol[0], vorofaces[0], vorovol[1], vorofaces[1] ).center(25)
else:
voro = " {0:5.2f} & {1:2d} ".format(vorovol[0], vorofaces[0] ).center(25)
voro+='&'
else:
voro = ""
print_and_log( "Voronoi volume = ", voro, imp = 'y')
return voro
def log_history(hstring):
try:
if hstring != header.history[-1]: header.history.append( hstring )
except:
header.history.append( hstring )
return
def gb_energy_volume(gb,bulk):
if (gb.end.rprimd[1] != bulk.end.rprimd[1]).any() or (gb.end.rprimd[2] != bulk.end.rprimd[2]).any():
print_and_log("Warning! You are trying to calculate gb_energy from cells with different lateral sizes:"+str(gb.end.rprimd)+" "+str(bulk.end.rprimd)+"\n")
#print bulk.vol
V_1at = bulk.vol / bulk.natom #* to_ang**3
E_1at = bulk.energy_sigma0 / bulk.natom
A = np.linalg.norm( np.cross(gb.end.rprimd[1], gb.end.rprimd[2]) ) #surface area of gb
#print A
gb.v_gb = ( gb.vol - V_1at * gb.natom) / A / 2. * 1000
gb.e_gb = ( gb.energy_sigma0 - E_1at * gb.natom) / A / 2. * eV_A_to_J_m * 1000
gb.e_gb_init = ( gb.list_e_sigma0[0] - E_1at * gb.natom) / A / 2. * eV_A_to_J_m * 1000
gb.bulk_extpress = bulk.extpress
#print "Calc %s; e_gb_init = %.3f J/m^2; e_gb = %.3f J/m; v_gb = %.3f angstrom "%(gb.name, gb.e_gb_init, gb.e_gb, gb.v_gb )
outst = "%15s&%7.0f&%7.0f"%(gb.name, gb.e_gb, gb.v_gb)
return outst
def headers():
j = (7,12,14,7,8,9,9,5,5,20,5,20,8,12,20,8,5,8,8)
d="&"
header_for_bands= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"nband"+d+"Added, \%"+"\\\\"
header_for_ecut= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"Ecut,eV"+"\\\\"
header_for_npar= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"NPAR".center(j[16])+d+"LPLANE".center(j[17])+"\\\\"
header_for_kpoints= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"k-mesh".center(j[8])+d+"k-spacings".center(j[9])+d+"nkpt".center(j[10])+"\\\\"
header_for_tsmear= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"k-mesh".center(j[8])+d+"tsmear, meV".center(j[13])+d+"Smearing error, meV/atom".center(j[14])+"\\\\"
header_for_stress= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"Stress, intr u.*1000".center(j[11])+d+"Pressure, MPa".center(j[12])
#print "\\hline"
return header_for_kpoints
def read_vectors(token, number_of_vectors, list_of_words, type_func = None, lists = False):
"""Returns the list of numpy vectors for the last match"""
# lists - return list of lists instead list of vectors
if type_func is None:
type_func = lambda a : float(a)
number_of_matches = list_of_words.count( token )
if number_of_matches == 0:
#print_and_log("Warning token '"+token+"' was not found! return empty\n")
return [None]
if number_of_matches > 1:
print_and_log("Warning token '"+token+"' was found more than one times\n")
raise RuntimeError
index = list_of_words.index(token, number_of_matches - 1 ) #Return the index of the last match
#print list_of_words[index]
list_of_vectors = []
list_of_lists = []
vector = np.zeros((3))
for i in range(number_of_vectors):
vector[0] = type_func(list_of_words[index + 1])
vector[1] = type_func(list_of_words[index + 2])
vector[2] = type_func(list_of_words[index + 3])
list3 = []
for j in 1,2,3:
list3.append(type_func(list_of_words[index + j]) )
index+=3
list_of_vectors.append(vector.copy())
list_of_lists.append(list3)
if lists:
out = list_of_lists
else:
out = list_of_vectors
return out
def read_string(token, length, string):
sh = len(token)+1
i = string.find(token)+sh
# print('length', i, i+length)
# sys.exit()
if i is -1:
return ''
else:
return string[i:i+length]
def read_list(token, number_of_elements, ttype, list_of_words):
"""Input is token to find, number of elements to read, type of elements and list of words,
where to search
Returns the list of elements for the last match"""
number_of_matches = list_of_words.count( token )
#if number_of_elements == 0: raise RuntimeError
if number_of_matches > 1:
print_and_log("Warning token '"+token+"' was found more than one times\n")
raise RuntimeError
if number_of_matches == 0 or number_of_elements == 0:
#print_and_log("Warning token '"+token+"' was not found or asked number of elements is zero! set to [None]\n")
#if ttype == str:
# return ['']*number_of_elements
#else:
# return [0]*number_of_elements
return [None]
try:
index = list_of_words.index(token, number_of_matches - 1 ) #Return the index of the last match
except ValueError:
print_and_log("Warning!, token "+token+" was not found. I return [None]!\n")
return [None]
index+=1 #the position of token value
list_of_elements = []
#define function dependig on type:
if ttype == int :
def convert(a):
return int(a)
elif ttype == float:
def convert(a):
# print a
return float(a)
elif ttype == str :
def convert(a):
return str(a)
#print list_of_words[index], type(list_of_words[index])
if list_of_words[index] == "None" :
def convert(a):
return [None]
#Make convertion
for i in range(number_of_elements):
if 'None' in list_of_words[index]:
list_of_elements.append(None)
else:
list_of_elements.append( convert( list_of_words[index] ) )
index+=1
return list_of_elements
def words(fileobj):
"""Generator of words. However does not allow to use methods of list for returned"""
for line in fileobj:
for word in line.split():
yield word
def server_cp(copy_file, to, gz = True, scratch = False, new_filename = None):
if scratch:
if not header.PATH2ARCHIVE:
printlog('Warning! PATH2ARCHIVE is empty! Please put path archive in ~/simanrc.py or ./project_conf.py ')
copy_file = header.PATH2ARCHIVE + '/' + copy_file
else:
copy_file = header.project_path_cluster + '/' + copy_file
filename = os.path.basename(copy_file)
if new_filename is None:
new_filename = filename
if gz:
command = 'cp '+copy_file + ' ' + to +'/'+new_filename + '.gz ; gunzip -f '+ to+ '/'+new_filename+'.gz'
else:
command = 'cp '+copy_file + ' ' + to +'/'+new_filename
printlog('Running on server', command, imp = '')
if file_exists_on_server(copy_file, header.cluster_address):
out = run_on_server(command, addr = header.cluster_address)
printlog('Output of run_on_server', out, imp = '')
else:
out = 'error, file does not exist on server: '+copy_file
return out
def wrapper_cp_on_server(file, to, new_filename = None):
"""
tries iterativly scratch and gz
"""
copy_to = to
copy_file = file
filename = os.path.basename(file)
if new_filename:
app = 'with new name '+new_filename
else:
app = ''
for s, gz in product([0,1], ['', '.gz']):
printlog('scratch, gz:', s, gz)
out = server_cp(copy_file+gz, to = to, gz = gz, scratch = s, new_filename = new_filename)
if out == '':
printlog('File', filename, 'was succesfully copied to',to, app, imp = 'y')
break
# else:
else:
printlog('Warning! File was not copied, probably it does not exist. Try using header.warnings = "neyY" for more details', imp = 'y')
return
def update_incar(parameter = None, value = None, u_ramp_step = None, write = True, f = None, run = False, st = None):
"""Modifications of INCAR. Take attention that *parameter* will be changed to new *value*
if it only already exist in INCAR. *u_ramp_step*-current step to determine u,
*write*-sometimes just the return value is needed.
Returns U value corresponding to *u_ramp_step*.
"""
self = st
u_step = None
if parameter == 'LDAUU':
#Update only non-zero elements of LDAUU with value
set_LDAUU_list = self.set.vasp_params['LDAUU']
new_LDAUU_list = copy.deepcopy(set_LDAUU_list)
# print set_LDAUU_list
u_step = 0.0
for i, u in enumerate(set_LDAUU_list):
if u == 0:
continue
u_step = np.linspace(0, u, self.set.u_ramping_nstep)[u_ramp_step]
u_step = np.round(u_step, 1)
# new_LDAUU_list[i] = value
new_LDAUU_list[i] = u_step
new_LDAUU = 'LDAUU = '+' '.join(['{:}']*len(new_LDAUU_list)).format(*new_LDAUU_list)
command = "sed -i.bak '/LDAUU/c\\" + new_LDAUU + "' INCAR\n"
#print('u_step',u_step)
#sys.exit()
elif parameter == 'MAGMOM':
new_incar_string = parameter + ' = ' + ' '.join(['{:}']*len(value)).format(*value)
command = "sed -i.bak '/"+parameter+"/c\\" + new_incar_string + "' INCAR\n"
# elif parameter in ['IMAGES', 'ISPIN']:
else:
new_incar_string = parameter + ' = ' + str(value)
command = "sed -i.bak '/"+parameter+"/c\\" + new_incar_string + "' INCAR\n"
if write and f:
f.write(command)
if run:
runBash(command)
return u_step #for last element
def check_output(filename, check_string, load):
"""
Check if file exist and it is finished by search for check_string
"""
if filename and os.path.exists(filename):
out = grep_file(check_string, filename, reverse = True)
printlog('The grep result of',filename, 'is:', out)
# sys.exit()
if check_string in out or 'un' in load:
state = '4. Finished'
else:
state = '5. Broken outcar'
else:
state = '5. no OUTCAR'
return state
| gpl-2.0 |
yuvrajsingh86/DeepLearning_Udacity | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
ravenshooter/BA_Analysis | Preprocess.py | 1 | 5604 |
import numpy
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import scipy
import mdp
import csv
from thread import start_new_thread
import DataSet
from DataAnalysis import plot
from Main import getProjectPath
def readFileToNumpy(fileName):
reader=csv.reader(open(fileName,"rb"),delimiter=',')
x=list(reader)
return numpy.array(x[1:]).astype('float')
def separateInputData(fileData,removeErrors=True):
if removeErrors:
error_inds = fileData[:,-1]==False
fileData = fileData[error_inds]
fused = numpy.atleast_2d(fileData[:,1:4])
gyro = numpy.atleast_2d(fileData[:,4:7])
acc = numpy.atleast_2d(fileData[:,7:10])
targets = numpy.atleast_2d(fileData[:,10:])
return fused, gyro, acc, targets
def transformToDelta(vals):
newVals = numpy.zeros((len(vals),len(vals[0])))
for i in range(1,len(vals)):
newVals[i-1] = vals[i]-vals[i-1]
return newVals
def removeLOverflow(fused):
for j in range(0,3):
for i in range(1,len(fused)):
if numpy.abs(fused[i-1,j] - fused[i,j]) > numpy.pi:
fused[i:,j] = fused[i:,j] * -1
return fused
def applyActivationFilter(inputData, width):
actLevel = numpy.sum(numpy.abs(inputData),1)
target = numpy.zeros((len(inputData),1))
for i in range(width,len(inputData-width)):
target[i] = numpy.mean(actLevel[i-width:i+width])
return target
def centerAndNormalize(inputData):
means = numpy.mean(inputData, 0)
centered = inputData - means
vars = numpy.std(centered, 0)
normalized = centered/vars
return normalized, means, vars
def getTrainingBeginAndEndIndex(targetSig):
beginInd = 0
endInd = len(targetSig)
for i in range(0,len(targetSig)):
if targetSig[i] == 1:
beginInd= i-1;
break
for i in range(0,len(targetSig)):
if targetSig[len(targetSig)-1-i] == 1:
endInd= len(targetSig)-i;
break
return beginInd,endInd
def formatDataSet(data):
print data.shape
newStart = input("Start:")
newEnd = input("End:")
newData = data[newStart:newEnd,:]
return newData
def formatTargetFilter(data):
treshold = input('Treshold:')
targetFunction = applyFormatTargetFilter(data, treshold)
plt.figure()
plt.plot(data[:,9])
plt.plot(data[:,10])
plt.plot(targetFunction)
return targetFunction
def applyFormatTargetFilter(data, treshold):
targetFunction = (data[:,10] > treshold).astype(float)
return numpy.atleast_2d(targetFunction).T
def removeArea(data):
cutOutStart = input("Start:")
cutOutEnd = input("End:")
newDataStart = data[:cutOutStart,:]
newDataEnd = data[cutOutEnd:,:]
return numpy.concatenate((newDataStart,newDataEnd))
def plotData(data):
plt.figure()
plt.clf()
plt.subplot(411)
plt.title('Fused')
plt.plot(data[:,0:3])
plt.plot(data[:,9])
plt.plot(data[:,10])
plt.subplot(412)
plt.title('Gyro')
plt.plot(data[:,3:6])
plt.plot(data[:,9])
plt.plot(data[:,10])
plt.subplot(413)
plt.title('Acc')
plt.plot(data[:,6:9])
plt.plot(data[:,9])
plt.plot(data[:,10])
plt.subplot(414)
plt.title('Targets')
plt.plot(data[:,9])
plt.plot(data[:,10])
plt.show()
def writeToCSV(data,fileName):
numpy.savetxt(getProjectPath()+"\\dataSets\\"+fileName+".csv", data, delimiter=";")
def safeToDataSet(fileName, data, means, stds, gestures, targetTreshold):
ds = DataSet.DataSet(data[:,0:3],data[:,3:6],data[:,6:9],numpy.append(data[:,9:], applyFormatTargetFilter(data, targetTreshold), 1), \
means, stds, gestures)
ds.writeToFile(fileName)
def load(nr):
global i
plt.close('all')
i = readFile("nadja\\nadja_"+str(nr)+".csv")
plotData(i)
def safe(inputData,aaa,nr):
writeToCSV(numpy.concatenate((inputData,numpy.atleast_2d(aaa).T),1),"nadja_fitted_"+str(nr))
def readFile(fileName):
return readFileToNumpy(getProjectPath()+'dataSets\\'+fileName)
if __name__ == '__main__':
#def main():
inputFileName = ["2016-03-14-10-30-47-nike_fullSet_0.csv"]
fileData = numpy.zeros((1,31))
for fileName in inputFileName:
newData = readFileToNumpy(getProjectPath()+'dataSets\\'+fileName)
print newData.shape
fileData = numpy.append(fileData,newData,0)
fused, gyro, acc, targets = separateInputData(fileData)
#fused = removeLOverflow(fused)
#fused = transformToDelta(fused)
_, f_means, f_stds = centerAndNormalize(fused)
_, g_means, g_stds = centerAndNormalize(gyro)
_, a_means, a_stds = centerAndNormalize(acc)
means = numpy.concatenate((f_means,g_means,a_means),0)
stds = numpy.concatenate((f_stds,g_stds,a_stds),0)
gestures = numpy.max(targets,0)
dataSets = []
gestureSets = []
for i in range(0,len(targets[0])):
start, end = getTrainingBeginAndEndIndex(targets[:,i])
t_fused = fused[start:end,:]
t_gyro = gyro[start:end,:]
t_acc = acc[start:end,:]
t_target =numpy.atleast_2d(targets[start:end,i]).T
t_accFilter = applyActivationFilter(numpy.concatenate((t_fused,t_gyro,t_acc),1),6)
a = numpy.concatenate((t_fused,t_gyro,t_acc,t_target,t_accFilter),1)
dataSets.append(a)
gestureSets.append(numpy.max(targets[start:end,:],0))
| mit |
JosmanPS/scikit-learn | sklearn/linear_model/least_angle.py | 37 | 53448 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coeffiencts up to the smallest alpha value (alphas_[alphas_ >
0.].min() when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (alphas_[alphas_ >
0.].min() when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (alphas_[alphas_ >
0.].min() when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (alphas_[alphas_ >
0.].min() when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
junwoo091400/MyCODES | Projects/FootPad_Logger/logged_data_analyzer_LSTM/RNN_LSTM.py | 1 | 2131 |
from __future__ import print_function, division
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import ipdb
def RNN_LSTM(batch_size_in = 5, total_len_in = 30000, pad_len_in = 5, backprop_len_in = 50, state_size_in = 10, num_class_in = 32):
# total_len_in = (backprop_len_in) * (num_batches)
# Get inputs.
batch_size = batch_size_in
total_series_length = total_len_in
pad_length = pad_len_in
truncated_backprop_length = backprop_len_in
state_size = state_size_in
num_classes = num_class_in
num_batches = total_series_length // truncated_backprop_length
#Model generate
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length, pad_length])
batchY_placeholder = tf.placeholder(tf.int32, [batch_size, truncated_backprop_length])
cell_state = tf.placeholder(tf.float32, [batch_size, state_size])
hidden_state = tf.placeholder(tf.float32, [batch_size, state_size])
init_state = tf.nn.rnn_cell.LSTMStateTuple(cell_state, hidden_state)
# LSTM -> classes.
W2 = tf.Variable(np.random.rand(state_size, num_classes),dtype=tf.float32)
b2 = tf.Variable(np.zeros((1, num_classes)), dtype=tf.float32)
# Unpack columns
inputs_series = tf.unstack(batchX_placeholder, axis=1)
labels_series = tf.unstack(batchY_placeholder, axis=1) # Becomes [truncated_len, batch_size]
# Forward passes
cell = tf.contrib.rnn.BasicLSTMCell(state_size, state_is_tuple=True)
states_series, current_state = tf.contrib.rnn.static_rnn(cell, inputs_series, init_state)#Input 'init_state' + 'inputs_series' + 'cell'
logits_series = [tf.matmul(state, W2) + b2 for state in states_series] #Broadcasted addition
predictions_series = [tf.nn.softmax(logits) for logits in logits_series]
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels) for logits, labels in zip(logits_series,labels_series)]
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(0.3).minimize(total_loss)
return (batchX_placeholder, batchY_placeholder, cell_state, hidden_state, current_state, predictions_series, W2, b2, cell, train_step, total_loss) | gpl-3.0 |
AIML/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
rohanp/scikit-learn | sklearn/model_selection/tests/test_validation.py | 20 | 27961 | """Test the validation module"""
from __future__ import division
import sys
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection._validation import _check_is_permutation
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.linear_model import Ridge
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator
from sklearn.multiclass import OneVsRestClassifier
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from test_split import MockClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.arange(10) // 2
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cross_val_score, clf, X_3d, y)
def test_cross_val_score_predict_labels():
# Check if ValueError (when labels is None) propagates to cross_val_score
# and cross_val_predict
# And also check if labels is correctly passed to the cv object
X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
clf = SVC(kernel="linear")
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
assert_raise_message(ValueError,
"The labels parameter should not be None",
cross_val_score, estimator=clf, X=X, y=y, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
cross_val_predict, estimator=clf, X=X, y=y, cv=cv)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
kfold = KFold(5)
scores_indices = cross_val_score(svm, X, y, cv=kfold)
kfold = KFold(5)
cv_masks = []
for train, test in kfold.split(X, y):
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cross_val_score, BrokenEstimator(), X)
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cross_val_score(reg, X, y, cv=5, scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = StratifiedKFold(2)
score_label, _, pvalue_label = permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
permutation_test_score(p, X, y, cv=5)
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cross_val_score(p, X, y, cv=5)
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cross_val_score(clf, X, y, scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = KFold()
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = LeaveOneOut()
preds = cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
class BadCV():
def split(self, X, y=None, labels=None):
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cross_val_predict, est, X, y, cv=BadCV())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_predict(clf, X_df, y_ser)
def test_cross_val_score_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
def test_check_is_permutation():
p = np.arange(100)
assert_true(_check_is_permutation(p, 100))
assert_false(_check_is_permutation(np.delete(p, 23), 100))
p[0] = 23
assert_false(_check_is_permutation(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cross_val_predict(classif, X, y, cv=10)
preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
nmartensen/pandas | pandas/io/formats/common.py | 16 | 1094 | # -*- coding: utf-8 -*-
"""
Common helper methods used in different submodules of pandas.io.formats
"""
def get_level_lengths(levels, sentinel=''):
"""For each index in each level the function returns lengths of indexes.
Parameters
----------
levels : list of lists
List of values on for level.
sentinel : string, optional
Value which states that no new index starts on there.
Returns
----------
Returns list of maps. For each level returns map of indexes (key is index
in row and value is length of index).
"""
if len(levels) == 0:
return []
control = [True for x in levels[0]]
result = []
for level in levels:
last_index = 0
lengths = {}
for i, key in enumerate(level):
if control[i] and key == sentinel:
pass
else:
control[i] = False
lengths[last_index] = i - last_index
last_index = i
lengths[last_index] = len(level) - last_index
result.append(lengths)
return result
| bsd-3-clause |
phdowling/scikit-learn | sklearn/preprocessing/label.py | 137 | 27165 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Joel Nothman <joel.nothman@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
xwolf12/scikit-learn | sklearn/linear_model/randomized_l1.py | 95 | 23365 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
joernhees/scikit-learn | sklearn/metrics/cluster/supervised.py | 25 | 31477 | """Utilities to evaluate the clustering performance of models.
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# Arnaud Fouchet <foucheta@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# Gregory Stupp <stuppie@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
from math import log
import numpy as np
from scipy.misc import comb
from scipy import sparse as sp
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
from ...utils.validation import check_array
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays."""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, sparse=False):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps : None or float, optional.
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
sparse : boolean, optional.
If True, return a sparse CSR continency matrix. If ``eps is not None``,
and ``sparse is True``, will throw ValueError.
.. versionadded:: 0.18
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
Will be a ``scipy.sparse.csr_matrix`` if ``sparse=True``.
"""
if eps is not None and sparse:
raise ValueError("Cannot set 'eps' when sparse=True")
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = sp.coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int)
if sparse:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://link.springer.com/article/10.1007%2FBF01908075
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
n_classes = np.unique(labels_true).shape[0]
n_clusters = np.unique(labels_pred).shape[0]
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (n_classes == n_clusters == 1 or
n_classes == n_clusters == 0 or
n_classes == n_clusters == n_samples):
return 1.0
# Compute the ARI using the contingency data
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
sum_comb_c = sum(comb2(n_c) for n_c in np.ravel(contingency.sum(axis=1)))
sum_comb_k = sum(comb2(n_k) for n_k in np.ravel(contingency.sum(axis=0)))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.data)
prod_comb = (sum_comb_c * sum_comb_k) / comb(n_samples, 2)
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return (sum_comb - prod_comb) / (mean_comb - prod_comb)
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure : float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
MI = mutual_info_score(None, None, contingency=contingency)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness /
(homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency : {None, array, sparse matrix},
shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi : float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
else:
contingency = check_array(contingency,
accept_sparse=['csr', 'csc', 'coo'],
dtype=[int, np.int32, np.int64])
if isinstance(contingency, np.ndarray):
# For an array
nzx, nzy = np.nonzero(contingency)
nz_val = contingency[nzx, nzy]
elif sp.issparse(contingency):
# For a sparse matrix
nzx, nzy, nz_val = sp.find(contingency)
else:
raise ValueError("Unsupported type for 'contingency': %s" %
type(contingency))
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = pi.take(nzx) * pj.take(nzy)
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def fowlkes_mallows_score(labels_true, labels_pred, sparse=False):
"""Measure the similarity of two clusterings of a set of points.
The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pair of
points that belongs in the same clusters in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pair of points that belongs in the same clusters in
``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of
**False Negative** (i.e the number of pair of points that belongs in the
same clusters in ``labels_pred`` and not in ``labels_True``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : int array, shape = (``n_samples``,)
A clustering of the data into disjoint subsets.
labels_pred : array, shape = (``n_samples``, )
A clustering of the data into disjoint subsets.
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<http://wildfire.stat.ucla.edu/pdflibrary/fowlkes.pdf>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples, = labels_true.shape
c = contingency_matrix(labels_true, labels_pred, sparse=True)
tk = np.dot(c.data, c.data) - n_samples
pk = np.sum(np.asarray(c.sum(axis=0)).ravel() ** 2) - n_samples
qk = np.sum(np.asarray(c.sum(axis=1)).ravel() ** 2) - n_samples
return tk / np.sqrt(pk * qk) if tk != 0. else 0.
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float64)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
MartinDelzant/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
eistre91/ThinkStats2 | code/timeseries.py | 66 | 18035 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import statsmodels.tsa.stattools as smtsa
import matplotlib.pyplot as pyplot
import thinkplot
import thinkstats2
FORMATS = ['png']
def ReadData():
"""Reads data about cannabis transactions.
http://zmjones.com/static/data/mj-clean.csv
returns: DataFrame
"""
transactions = pandas.read_csv('mj-clean.csv', parse_dates=[5])
return transactions
def tmean(series):
"""Computes a trimmed mean.
series: Series
returns: float
"""
t = series.values
n = len(t)
if n <= 3:
return t.mean()
trim = max(1, n/10)
return np.mean(sorted(t)[trim:n-trim])
def GroupByDay(transactions, func=np.mean):
"""Groups transactions by day and compute the daily mean ppg.
transactions: DataFrame of transactions
returns: DataFrame of daily prices
"""
groups = transactions[['date', 'ppg']].groupby('date')
daily = groups.aggregate(func)
daily['date'] = daily.index
start = daily.date[0]
one_year = np.timedelta64(1, 'Y')
daily['years'] = (daily.date - start) / one_year
return daily
def GroupByQualityAndDay(transactions):
"""Divides transactions by quality and computes mean daily price.
transaction: DataFrame of transactions
returns: map from quality to time series of ppg
"""
groups = transactions.groupby('quality')
dailies = {}
for name, group in groups:
dailies[name] = GroupByDay(group)
return dailies
def PlotDailies(dailies):
"""Makes a plot with daily prices for different qualities.
dailies: map from name to DataFrame
"""
thinkplot.PrePlot(rows=3)
for i, (name, daily) in enumerate(dailies.items()):
thinkplot.SubPlot(i+1)
title = 'price per gram ($)' if i == 0 else ''
thinkplot.Config(ylim=[0, 20], title=title)
thinkplot.Scatter(daily.ppg, s=10, label=name)
if i == 2:
pyplot.xticks(rotation=30)
else:
thinkplot.Config(xticks=[])
thinkplot.Save(root='timeseries1',
formats=FORMATS)
def RunLinearModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
model = smf.ols('ppg ~ years', data=daily)
results = model.fit()
return model, results
def PlotFittedValues(model, results, label=''):
"""Plots original data and fitted values.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
values = model.endog
thinkplot.Scatter(years, values, s=15, label=label)
thinkplot.Plot(years, results.fittedvalues, label='model')
def PlotResiduals(model, results):
"""Plots the residuals of a model.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
thinkplot.Plot(years, results.resid, linewidth=0.5, alpha=0.5)
def PlotResidualPercentiles(model, results, index=1, num_bins=20):
"""Plots percentiles of the residuals.
model: StatsModel model object
results: StatsModel results object
index: which exogenous variable to use
num_bins: how many bins to divide the x-axis into
"""
exog = model.exog[:, index]
resid = results.resid.values
df = pandas.DataFrame(dict(exog=exog, resid=resid))
bins = np.linspace(np.min(exog), np.max(exog), num_bins)
indices = np.digitize(exog, bins)
groups = df.groupby(indices)
means = [group.exog.mean() for _, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.resid) for _, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
percentiles = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(means, percentiles, label=label)
def SimulateResults(daily, iters=101, func=RunLinearModel):
"""Run simulations based on resampling residuals.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
_, results = func(daily)
fake = daily.copy()
result_seq = []
for _ in range(iters):
fake.ppg = results.fittedvalues + thinkstats2.Resample(results.resid)
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def SimulateIntervals(daily, iters=101, func=RunLinearModel):
"""Run simulations based on different subsets of the data.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
result_seq = []
starts = np.linspace(0, len(daily), iters).astype(int)
for start in starts[:-2]:
subset = daily[start:]
_, results = func(subset)
fake = subset.copy()
for _ in range(iters):
fake.ppg = (results.fittedvalues +
thinkstats2.Resample(results.resid))
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def GeneratePredictions(result_seq, years, add_resid=False):
"""Generates an array of predicted values from a list of model results.
When add_resid is False, predictions represent sampling error only.
When add_resid is True, they also include residual error (which is
more relevant to prediction).
result_seq: list of model results
years: sequence of times (in years) to make predictions for
add_resid: boolean, whether to add in resampled residuals
returns: sequence of predictions
"""
n = len(years)
d = dict(Intercept=np.ones(n), years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict_seq = []
for fake_results in result_seq:
predict = fake_results.predict(predict_df)
if add_resid:
predict += thinkstats2.Resample(fake_results.resid, n)
predict_seq.append(predict)
return predict_seq
def GenerateSimplePrediction(results, years):
"""Generates a simple prediction.
results: results object
years: sequence of times (in years) to make predictions for
returns: sequence of predicted values
"""
n = len(years)
inter = np.ones(n)
d = dict(Intercept=inter, years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict = results.predict(predict_df)
return predict
def PlotPredictions(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateResults(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.3, color='gray')
predict_seq = GeneratePredictions(result_seq, years, add_resid=False)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.5, color='gray')
def PlotIntervals(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions based on different intervals.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateIntervals(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.2, color='gray')
def Correlate(dailies):
"""Compute the correlation matrix between prices for difference qualities.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
df[name] = daily.ppg
return df.corr()
def CorrelateResid(dailies):
"""Compute the correlation matrix between residuals.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
_, results = RunLinearModel(daily)
df[name] = results.resid
return df.corr()
def TestCorrelateResid(dailies, iters=101):
"""Tests observed correlations.
dailies: map from quality to time series of ppg
iters: number of simulations
"""
t = []
names = ['high', 'medium', 'low']
for name in names:
daily = dailies[name]
t.append(SimulateResults(daily, iters=iters))
corr = CorrelateResid(dailies)
arrays = []
for result_seq in zip(*t):
df = pandas.DataFrame()
for name, results in zip(names, result_seq):
df[name] = results.resid
opp_sign = corr * df.corr() < 0
arrays.append((opp_sign.astype(int)))
print(np.sum(arrays))
def RunModels(dailies):
"""Runs linear regression for each group in dailies.
dailies: map from group name to DataFrame
"""
rows = []
for daily in dailies.values():
_, results = RunLinearModel(daily)
intercept, slope = results.params
p1, p2 = results.pvalues
r2 = results.rsquared
s = r'%0.3f (%0.2g) & %0.3f (%0.2g) & %0.3f \\'
row = s % (intercept, p1, slope, p2, r2)
rows.append(row)
# print results in a LaTeX table
print(r'\begin{tabular}{|c|c|c|}')
print(r'\hline')
print(r'intercept & slope & $R^2$ \\ \hline')
for row in rows:
print(row)
print(r'\hline')
print(r'\end{tabular}')
def FillMissing(daily, span=30):
"""Fills missing values with an exponentially weighted moving average.
Resulting DataFrame has new columns 'ewma' and 'resid'.
daily: DataFrame of daily prices
span: window size (sort of) passed to ewma
returns: new DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
ewma = pandas.ewma(reindexed.ppg, span=span)
resid = (reindexed.ppg - ewma).dropna()
fake_data = ewma + thinkstats2.Resample(resid, len(reindexed))
reindexed.ppg.fillna(fake_data, inplace=True)
reindexed['ewma'] = ewma
reindexed['resid'] = reindexed.ppg - ewma
return reindexed
def AddWeeklySeasonality(daily):
"""Adds a weekly pattern.
daily: DataFrame of daily prices
returns: new DataFrame of daily prices
"""
frisat = (daily.index.dayofweek==4) | (daily.index.dayofweek==5)
fake = daily.copy()
fake.ppg[frisat] += np.random.uniform(0, 2, frisat.sum())
return fake
def PrintSerialCorrelations(dailies):
"""Prints a table of correlations with different lags.
dailies: map from category name to DataFrame of daily prices
"""
filled_dailies = {}
for name, daily in dailies.items():
filled_dailies[name] = FillMissing(daily, span=30)
# print serial correlations for raw price data
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.ppg, lag=1)
print(name, corr)
rows = []
for lag in [1, 7, 30, 365]:
row = [str(lag)]
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.resid, lag)
row.append('%.2g' % corr)
rows.append(row)
print(r'\begin{tabular}{|c|c|c|c|}')
print(r'\hline')
print(r'lag & high & medium & low \\ \hline')
for row in rows:
print(' & '.join(row) + r' \\')
print(r'\hline')
print(r'\end{tabular}')
filled = filled_dailies['high']
acf = smtsa.acf(filled.resid, nlags=365, unbiased=True)
print('%0.3f, %0.3f, %0.3f, %0.3f, %0.3f' %
(acf[0], acf[1], acf[7], acf[30], acf[365]))
def SimulateAutocorrelation(daily, iters=1001, nlags=40):
"""Resample residuals, compute autocorrelation, and plot percentiles.
daily: DataFrame
iters: number of simulations to run
nlags: maximum lags to compute autocorrelation
"""
# run simulations
t = []
for _ in range(iters):
filled = FillMissing(daily, span=30)
resid = thinkstats2.Resample(filled.resid)
acf = smtsa.acf(resid, nlags=nlags, unbiased=True)[1:]
t.append(np.abs(acf))
high = thinkstats2.PercentileRows(t, [97.5])[0]
low = -high
lags = range(1, nlags+1)
thinkplot.FillBetween(lags, low, high, alpha=0.2, color='gray')
def PlotAutoCorrelation(dailies, nlags=40, add_weekly=False):
"""Plots autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
nlags: number of lags to compute
add_weekly: boolean, whether to add a simulated weekly pattern
"""
thinkplot.PrePlot(3)
daily = dailies['high']
SimulateAutocorrelation(daily)
for name, daily in dailies.items():
if add_weekly:
daily = AddWeeklySeasonality(daily)
filled = FillMissing(daily, span=30)
acf = smtsa.acf(filled.resid, nlags=nlags, unbiased=True)
lags = np.arange(len(acf))
thinkplot.Plot(lags[1:], acf[1:], label=name)
def MakeAcfPlot(dailies):
"""Makes a figure showing autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
"""
axis = [0, 41, -0.2, 0.2]
thinkplot.PrePlot(cols=2)
PlotAutoCorrelation(dailies, add_weekly=False)
thinkplot.Config(axis=axis,
loc='lower right',
ylabel='correlation',
xlabel='lag (day)')
thinkplot.SubPlot(2)
PlotAutoCorrelation(dailies, add_weekly=True)
thinkplot.Save(root='timeseries9',
axis=axis,
loc='lower right',
xlabel='lag (days)',
formats=FORMATS)
def PlotRollingMean(daily, name):
"""Plots rolling mean and EWMA.
daily: DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
thinkplot.PrePlot(cols=2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
roll_mean = pandas.rolling_mean(reindexed.ppg, 30)
thinkplot.Plot(roll_mean, label='rolling mean')
pyplot.xticks(rotation=30)
thinkplot.Config(ylabel='price per gram ($)')
thinkplot.SubPlot(2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
ewma = pandas.ewma(reindexed.ppg, span=30)
thinkplot.Plot(ewma, label='EWMA')
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries10',
formats=FORMATS)
def PlotFilled(daily, name):
"""Plots the EWMA and filled data.
daily: DataFrame of daily prices
"""
filled = FillMissing(daily, span=30)
thinkplot.Scatter(filled.ppg, s=15, alpha=0.3, label=name)
thinkplot.Plot(filled.ewma, label='EWMA', alpha=0.4)
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries8',
ylabel='price per gram ($)',
formats=FORMATS)
def PlotLinearModel(daily, name):
"""Plots a linear fit to a sequence of prices, and the residuals.
daily: DataFrame of daily prices
name: string
"""
model, results = RunLinearModel(daily)
PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries2',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)',
formats=FORMATS)
PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries3',
title='residuals',
xlabel='years',
ylabel='price per gram ($)',
formats=FORMATS)
#years = np.linspace(0, 5, 101)
#predict = GenerateSimplePrediction(results, years)
def main(name):
thinkstats2.RandomSeed(18)
transactions = ReadData()
dailies = GroupByQualityAndDay(transactions)
PlotDailies(dailies)
RunModels(dailies)
PrintSerialCorrelations(dailies)
MakeAcfPlot(dailies)
name = 'high'
daily = dailies[name]
PlotLinearModel(daily, name)
PlotRollingMean(daily, name)
PlotFilled(daily, name)
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries4',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
name = 'medium'
daily = dailies[name]
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotIntervals(daily, years)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries5',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
hrjn/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
funbaker/astropy | astropy/table/tests/test_table.py | 2 | 69207 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import gc
import sys
import copy
from io import StringIO
from collections import OrderedDict
import pytest
import numpy as np
from numpy.testing import assert_allclose
from ...io import fits
from ...tests.helper import (assert_follows_unicode_guidelines,
ignore_warnings, catch_warnings)
from ...utils.data import get_pkg_data_filename
from ... import table
from ... import units as u
from .conftest import MaskedTable
try:
with ignore_warnings(DeprecationWarning):
# Ignore DeprecationWarning on pandas import in Python 3.5--see
# https://github.com/astropy/astropy/issues/4380
import pandas # pylint: disable=W0611
except ImportError:
HAS_PANDAS = False
else:
HAS_PANDAS = True
class SetupData:
def _setup(self, table_types):
self._table_type = table_types.Table
self._column_type = table_types.Column
@property
def a(self):
if self._column_type is not None:
if not hasattr(self, '_a'):
self._a = self._column_type(
[1, 2, 3], name='a', format='%d',
meta={'aa': [0, 1, 2, 3, 4]})
return self._a
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, '_b'):
self._b = self._column_type(
[4, 5, 6], name='b', format='%d', meta={'aa': 1})
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, '_c'):
self._c = self._column_type([7, 8, 9], 'c')
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, '_d'):
self._d = self._column_type([7, 8, 7], 'd')
return self._d
@property
def obj(self):
if self._column_type is not None:
if not hasattr(self, '_obj'):
self._obj = self._column_type([1, 'string', 3], 'obj', dtype='O')
return self._obj
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a, self.b])
return self._t
@pytest.mark.usefixtures('table_types')
class TestSetTableColumn(SetupData):
def test_set_row(self, table_types):
"""Set a row from a tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[1] = (20, 21)
assert t['a'][0] == 1
assert t['a'][1] == 20
assert t['a'][2] == 3
assert t['b'][0] == 4
assert t['b'][1] == 21
assert t['b'][2] == 6
def test_set_row_existing(self, table_types):
"""Set a row from another existing row"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[0] = t[1]
assert t[0][0] == 2
assert t[0][1] == 5
def test_set_row_fail_1(self, table_types):
"""Set a row from an incorrectly-sized or typed set of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = (20, 21, 22)
with pytest.raises(ValueError):
t[1] = 0
def test_set_row_fail_2(self, table_types):
"""Set a row from an incorrectly-typed tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = ('abc', 'def')
def test_set_new_col_new_table(self, table_types):
"""Create a new column in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t['aa'] = self.a
# Test that the new column name is 'aa' and that the values match
assert np.all(t['aa'] == self.a)
assert t.colnames == ['aa']
def test_set_new_col_new_table_quantity(self, table_types):
"""Create a new column (from a quantity) in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t['aa'] = np.array([1, 2, 3]) * u.m
assert np.all(t['aa'] == np.array([1, 2, 3]))
assert t['aa'].unit == u.m
t['bb'] = 3 * u.m
assert np.all(t['bb'] == 3)
assert t['bb'].unit == u.m
def test_set_new_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Add a column
t['bb'] = self.b
assert np.all(t['bb'] == self.b)
assert t.colnames == ['a', 'bb']
assert t['bb'].meta == self.b.meta
assert t['bb'].format == self.b.format
# Add another column
t['c'] = t['a']
assert np.all(t['c'] == t['a'])
assert t.colnames == ['a', 'bb', 'c']
assert t['c'].meta == t['a'].meta
assert t['c'].format == t['a'].format
# Add a multi-dimensional column
t['d'] = table_types.Column(np.arange(12).reshape(3, 2, 2))
assert t['d'].shape == (3, 2, 2)
assert t['d'][0, 0, 1] == 1
# Add column from a list
t['e'] = ['hello', 'the', 'world']
assert np.all(t['e'] == np.array(['hello', 'the', 'world']))
# Make sure setting existing column still works
t['e'] = ['world', 'hello', 'the']
assert np.all(t['e'] == np.array(['world', 'hello', 'the']))
# Add a column via broadcasting
t['f'] = 10
assert np.all(t['f'] == 10)
# Add a column from a Quantity
t['g'] = np.array([1, 2, 3]) * u.m
assert np.all(t['g'].data == np.array([1, 2, 3]))
assert t['g'].unit == u.m
# Add a column from a (scalar) Quantity
t['g'] = 3 * u.m
assert np.all(t['g'].data == 3)
assert t['g'].unit == u.m
def test_set_new_unmasked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.Column(name='b', data=[1, 2, 3]) # unmasked
t['b'] = b
assert np.all(t['b'] == b)
def test_set_new_masked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.MaskedColumn(name='b', data=[1, 2, 3]) # masked
t['b'] = b
assert np.all(t['b'] == b)
def test_set_new_col_existing_table_fail(self, table_types):
"""Generate failure when creating a new column using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Wrong size
with pytest.raises(ValueError):
t['b'] = [1, 2]
@pytest.mark.usefixtures('table_types')
class TestEmptyData():
def test_1(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', dtype=int, length=100))
assert len(t['a']) == 100
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', dtype=int, shape=(3, ), length=100))
assert len(t['a']) == 100
def test_3(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name='a', dtype=int))
assert len(t['a']) == 0
def test_4(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name='a', dtype=int, shape=(3, 4)))
assert len(t['a']) == 0
def test_5(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a')) # dtype is not specified
assert len(t['a']) == 0
def test_add_via_setitem_and_slice(self, table_types):
"""Test related to #3023 where a MaskedColumn is created with name=None
and then gets changed to name='a'. After PR #2790 this test fails
without the #3023 fix."""
t = table_types.Table()
t['a'] = table_types.Column([1, 2, 3])
t2 = t[:]
assert t2.colnames == t.colnames
@pytest.mark.usefixtures('table_types')
class TestNewFromColumns():
def test_simple(self, table_types):
cols = [table_types.Column(name='a', data=[1, 2, 3]),
table_types.Column(name='b', data=[4, 5, 6], dtype=np.float32)]
t = table_types.Table(cols)
assert np.all(t['a'].data == np.array([1, 2, 3]))
assert np.all(t['b'].data == np.array([4, 5, 6], dtype=np.float32))
assert type(t['b'][1]) is np.float32
def test_from_np_array(self, table_types):
cols = [table_types.Column(name='a', data=np.array([1, 2, 3], dtype=np.int64),
dtype=np.float64),
table_types.Column(name='b', data=np.array([4, 5, 6], dtype=np.float32))]
t = table_types.Table(cols)
assert np.all(t['a'] == np.array([1, 2, 3], dtype=np.float64))
assert np.all(t['b'] == np.array([4, 5, 6], dtype=np.float32))
assert type(t['a'][1]) is np.float64
assert type(t['b'][1]) is np.float32
def test_size_mismatch(self, table_types):
cols = [table_types.Column(name='a', data=[1, 2, 3]),
table_types.Column(name='b', data=[4, 5, 6, 7])]
with pytest.raises(ValueError):
table_types.Table(cols)
def test_name_none(self, table_types):
"""Column with name=None can init a table whether or not names are supplied"""
c = table_types.Column(data=[1, 2], name='c')
d = table_types.Column(data=[3, 4])
t = table_types.Table([c, d], names=(None, 'd'))
assert t.colnames == ['c', 'd']
t = table_types.Table([c, d])
assert t.colnames == ['c', 'col1']
@pytest.mark.usefixtures('table_types')
class TestReverse():
def test_reverse(self, table_types):
t = table_types.Table([[1, 2, 3],
['a', 'b', 'cc']])
t.reverse()
assert np.all(t['col0'] == np.array([3, 2, 1]))
assert np.all(t['col1'] == np.array(['cc', 'b', 'a']))
t2 = table_types.Table(t, copy=False)
assert np.all(t2['col0'] == np.array([3, 2, 1]))
assert np.all(t2['col1'] == np.array(['cc', 'b', 'a']))
t2 = table_types.Table(t, copy=True)
assert np.all(t2['col0'] == np.array([3, 2, 1]))
assert np.all(t2['col1'] == np.array(['cc', 'b', 'a']))
t2.sort('col0')
assert np.all(t2['col0'] == np.array([1, 2, 3]))
assert np.all(t2['col1'] == np.array(['a', 'b', 'cc']))
def test_reverse_big(self, table_types):
x = np.arange(10000)
y = x + 1
t = table_types.Table([x, y], names=('x', 'y'))
t.reverse()
assert np.all(t['x'] == x[::-1])
assert np.all(t['y'] == y[::-1])
@pytest.mark.usefixtures('table_types')
class TestColumnAccess():
def test_1(self, table_types):
t = table_types.Table()
with pytest.raises(KeyError):
t['a']
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[1, 2, 3]))
assert np.all(t['a'] == np.array([1, 2, 3]))
with pytest.raises(KeyError):
t['b'] # column does not exist
def test_itercols(self, table_types):
names = ['a', 'b', 'c']
t = table_types.Table([[1], [2], [3]], names=names)
for name, col in zip(names, t.itercols()):
assert name == col.name
assert isinstance(col, table_types.Column)
@pytest.mark.usefixtures('table_types')
class TestAddLength(SetupData):
def test_right_length(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b)
def test_too_long(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='b', data=[4, 5, 6, 7])) # data too long
def test_too_short(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='b', data=[4, 5])) # data too short
@pytest.mark.usefixtures('table_types')
class TestAddPosition(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 0)
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 1)
def test_3(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, -1)
def test_5(self, table_types):
self._setup(table_types)
t = table_types.Table()
with pytest.raises(ValueError):
t.index_column('b')
def test_6(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b)
assert t.columns.keys() == ['a', 'b']
def test_7(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column('a'))
assert t.columns.keys() == ['b', 'a']
def test_8(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column('a') + 1)
assert t.columns.keys() == ['a', 'b']
def test_9(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b, t.index_column('a') + 1)
t.add_column(self.c, t.index_column('b'))
assert t.columns.keys() == ['a', 'c', 'b']
def test_10(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
ia = t.index_column('a')
t.add_column(self.b, ia + 1)
t.add_column(self.c, ia)
assert t.columns.keys() == ['c', 'a', 'b']
@pytest.mark.usefixtures('table_types')
class TestAddName(SetupData):
def test_override_name(self, table_types):
self._setup(table_types)
t = table_types.Table()
# Check that we can override the name of the input column in the Table
t.add_column(self.a, name='b')
t.add_column(self.b, name='a')
assert t.columns.keys() == ['b', 'a']
# Check that we did not change the name of the input column
assert self.a.info.name == 'a'
assert self.b.info.name == 'b'
# Now test with an input column from another table
t2 = table_types.Table()
t2.add_column(t['a'], name='c')
assert t2.columns.keys() == ['c']
# Check that we did not change the name of the input column
assert t.columns.keys() == ['b', 'a']
# Check that we can give a name if none was present
col = table_types.Column([1, 2, 3])
t.add_column(col, name='c')
assert t.columns.keys() == ['b', 'a', 'c']
def test_default_name(self, table_types):
t = table_types.Table()
col = table_types.Column([1, 2, 3])
t.add_column(col)
assert t.columns.keys() == ['col0']
@pytest.mark.usefixtures('table_types')
class TestInitFromTable(SetupData):
def test_from_table_cols(self, table_types):
"""Ensure that using cols from an existing table gives
a clean copy.
"""
self._setup(table_types)
t = self.t
cols = t.columns
# Construct Table with cols via Table._new_from_cols
t2a = table_types.Table([cols['a'], cols['b'], self.c])
# Construct with add_column
t2b = table_types.Table()
t2b.add_column(cols['a'])
t2b.add_column(cols['b'])
t2b.add_column(self.c)
t['a'][1] = 20
t['b'][1] = 21
for t2 in [t2a, t2b]:
t2['a'][2] = 10
t2['b'][2] = 11
t2['c'][2] = 12
t2.columns['a'].meta['aa'][3] = 10
assert np.all(t['a'] == np.array([1, 20, 3]))
assert np.all(t['b'] == np.array([4, 21, 6]))
assert np.all(t2['a'] == np.array([1, 2, 10]))
assert np.all(t2['b'] == np.array([4, 5, 11]))
assert np.all(t2['c'] == np.array([7, 8, 12]))
assert t2['a'].name == 'a'
assert t2.columns['a'].meta['aa'][3] == 10
assert t.columns['a'].meta['aa'][3] == 3
@pytest.mark.usefixtures('table_types')
class TestAddColumns(SetupData):
def test_add_columns1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c])
assert t.colnames == ['a', 'b', 'c']
def test_add_columns2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d])
assert t.colnames == ['a', 'b', 'c', 'd']
assert np.all(t['c'] == np.array([7, 8, 9]))
def test_add_columns3(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[1, 0])
assert t.colnames == ['d', 'a', 'c', 'b']
def test_add_columns4(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[0, 0])
assert t.colnames == ['c', 'd', 'a', 'b']
def test_add_columns5(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[2, 2])
assert t.colnames == ['a', 'b', 'c', 'd']
def test_add_columns6(self, table_types):
"""Check that we can override column names."""
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c], names=['b', 'c', 'a'])
assert t.colnames == ['b', 'c', 'a']
def test_add_columns7(self, table_types):
"""Check that default names are used when appropriate."""
t = table_types.Table()
col0 = table_types.Column([1, 2, 3])
col1 = table_types.Column([4, 5, 3])
t.add_columns([col0, col1])
assert t.colnames == ['col0', 'col1']
def test_add_duplicate_column(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='a', data=[0, 1, 2]))
t.add_column(table_types.Column(name='a', data=[0, 1, 2]),
rename_duplicate=True)
t.add_column(self.b)
t.add_column(self.c)
assert t.colnames == ['a', 'a_1', 'b', 'c']
t.add_column(table_types.Column(name='a', data=[0, 1, 2]),
rename_duplicate=True)
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2']
# test adding column from a separate Table
t1 = table_types.Table()
t1.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(t1['a'])
t.add_column(t1['a'], rename_duplicate=True)
t1['a'][0] = 100 # Change original column
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3']
assert t1.colnames == ['a']
# Check new column didn't change (since name conflict forced a copy)
assert t['a_3'][0] == self.a[0]
def test_add_duplicate_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
with pytest.raises(ValueError):
t.add_columns([table_types.Column(name='a', data=[0, 1, 2]), table_types.Column(name='b', data=[0, 1, 2])])
t.add_columns([table_types.Column(name='a', data=[0, 1, 2]),
table_types.Column(name='b', data=[0, 1, 2])],
rename_duplicate=True)
t.add_column(self.d)
assert t.colnames == ['a', 'b', 'c', 'a_1', 'b_1', 'd']
@pytest.mark.usefixtures('table_types')
class TestAddRow(SetupData):
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, '_b'):
self._b = self._column_type(name='b', data=[4.0, 5.1, 6.2])
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, '_c'):
self._c = self._column_type(name='c', data=['7', '8', '9'])
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, '_d'):
self._d = self._column_type(name='d', data=[[1, 2], [3, 4], [5, 6]])
return self._d
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a, self.b, self.c])
return self._t
def test_add_none_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=('a', 'b', 'c'), dtype=('(2,)i', 'S4', 'O'))
t.add_row()
assert np.all(t['a'][0] == [0, 0])
assert t['b'][0] == ''
assert t['c'][0] == 0
t.add_row()
assert np.all(t['a'][1] == [0, 0])
assert t['b'][1] == ''
assert t['c'][1] == 0
def test_add_stuff_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=('a', 'b', 'obj'), dtype=('(2,)i', 'S8', 'O'))
t.add_row([[1, 2], 'hello', 'world'])
assert np.all(t['a'][0] == [1, 2])
assert t['b'][0] == 'hello'
assert t['obj'][0] == 'world'
# Make sure it is not repeating last row but instead
# adding zeros (as documented)
t.add_row()
assert np.all(t['a'][1] == [0, 0])
assert t['b'][1] == ''
assert t['obj'][1] == 0
def test_add_table_row(self, table_types):
self._setup(table_types)
t = self.t
t['d'] = self.d
t2 = table_types.Table([self.a, self.b, self.c, self.d])
t.add_row(t2[0])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 1]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t['c'] == np.array(['7', '8', '9', '7']))
assert np.all(t['d'] == np.array([[1, 2], [3, 4], [5, 6], [1, 2]]))
def test_add_table_row_obj(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.obj])
t.add_row([1, 4.0, [10]])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 1]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t['obj'] == np.array([1, 'string', 3, [10]], dtype='O'))
def test_add_qtable_row_multidimensional(self):
q = [[1, 2], [3, 4]] * u.m
qt = table.QTable([q])
qt.add_row(([5, 6] * u.km,))
assert np.all(qt['col0'] == [[1, 2], [3, 4], [5000, 6000]] * u.m)
def test_add_with_tuple(self, table_types):
self._setup(table_types)
t = self.t
t.add_row((4, 7.2, '1'))
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t['c'] == np.array(['7', '8', '9', '1']))
def test_add_with_list(self, table_types):
self._setup(table_types)
t = self.t
t.add_row([4, 7.2, '10'])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t['c'] == np.array(['7', '8', '9', '1']))
def test_add_with_dict(self, table_types):
self._setup(table_types)
t = self.t
t.add_row({'a': 4, 'b': 7.2})
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
if t.masked:
assert np.all(t['c'] == np.array(['7', '8', '9', '7']))
else:
assert np.all(t['c'] == np.array(['7', '8', '9', '']))
def test_add_with_none(self, table_types):
self._setup(table_types)
t = self.t
t.add_row()
assert len(t) == 4
assert np.all(t['a'].data == np.array([1, 2, 3, 0]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 0.0]))
assert np.all(t['c'].data == np.array(['7', '8', '9', '']))
def test_add_missing_column(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row({'bad_column': 1})
def test_wrong_size_tuple(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row((1, 2))
def test_wrong_vals_type(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(TypeError):
t.add_row(1)
def test_add_row_failures(self, table_types):
self._setup(table_types)
t = self.t
t_copy = table_types.Table(t, copy=True)
# Wrong number of columns
try:
t.add_row([1, 2, 3, 4])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
# Wrong data type
try:
t.add_row(['one', 2, 3])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
def test_insert_table_row(self, table_types):
"""
Light testing of Table.insert_row() method. The deep testing is done via
the add_row() tests which calls insert_row(index=len(self), ...), so
here just test that the added index parameter is handled correctly.
"""
self._setup(table_types)
row = (10, 40.0, 'x', [10, 20])
for index in range(-3, 4):
indices = np.insert(np.arange(3), index, 3)
t = table_types.Table([self.a, self.b, self.c, self.d])
t2 = t.copy()
t.add_row(row) # By now we know this works
t2.insert_row(index, row)
for name in t.colnames:
if t[name].dtype.kind == 'f':
assert np.allclose(t[name][indices], t2[name])
else:
assert np.all(t[name][indices] == t2[name])
for index in (-4, 4):
t = table_types.Table([self.a, self.b, self.c, self.d])
with pytest.raises(IndexError):
t.insert_row(index, row)
@pytest.mark.usefixtures('table_types')
class TestTableColumn(SetupData):
def test_column_view(self, table_types):
self._setup(table_types)
t = self.t
a = t.columns['a']
a[2] = 10
assert t['a'][2] == 10
@pytest.mark.usefixtures('table_types')
class TestArrayColumns(SetupData):
def test_1d(self, table_types):
self._setup(table_types)
b = table_types.Column(name='b', dtype=int, shape=(2, ), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t['b'].shape == (3, 2)
assert t['b'][0].shape == (2, )
def test_2d(self, table_types):
self._setup(table_types)
b = table_types.Column(name='b', dtype=int, shape=(2, 4), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t['b'].shape == (3, 2, 4)
assert t['b'][0].shape == (2, 4)
def test_3d(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
b = table_types.Column(name='b', dtype=int, shape=(2, 4, 6), length=3)
t.add_column(b)
assert t['b'].shape == (3, 2, 4, 6)
assert t['b'][0].shape == (2, 4, 6)
@pytest.mark.usefixtures('table_types')
class TestRemove(SetupData):
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a])
return self._t
@property
def t2(self):
if self._table_type is not None:
if not hasattr(self, '_t2'):
self._t2 = self._table_type([self.a, self.b, self.c])
return self._t2
def test_1(self, table_types):
self._setup(table_types)
self.t.remove_columns('a')
assert self.t.columns.keys() == []
assert self.t.as_array() is None
def test_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_columns('a')
assert self.t.columns.keys() == ['b']
assert self.t.dtype.names == ('b',)
assert np.all(self.t['b'] == np.array([4, 5, 6]))
def test_3(self, table_types):
"""Check remove_columns works for a single column with a name of
more than one character. Regression test against #2699"""
self._setup(table_types)
self.t['new_column'] = self.t['a']
assert 'new_column' in self.t.columns.keys()
self.t.remove_columns('new_column')
assert 'new_column' not in self.t.columns.keys()
def test_remove_nonexistent_row(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
self.t.remove_row(4)
def test_remove_row_0(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(0)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['b'] == np.array([5, 6]))
def test_remove_row_1(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(1)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['a'] == np.array([1, 3]))
def test_remove_row_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(2)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([7, 8]))
def test_remove_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows(slice(0, 2, 1))
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([9]))
def test_remove_row_list(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows([0, 2])
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([8]))
def test_remove_row_preserves_meta(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_rows([0, 2])
assert self.t['a'].meta == {'aa': [0, 1, 2, 3, 4]}
assert self.t.dtype == np.dtype([(str('a'), 'int'),
(str('b'), 'int')])
def test_delitem_row(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[1]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['a'] == np.array([1, 3]))
@pytest.mark.parametrize("idx", [[0, 2], np.array([0, 2])])
def test_delitem_row_list(self, table_types, idx):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[idx]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([8]))
def test_delitem_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[0:2]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([9]))
def test_delitem_row_fail(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[4]
def test_delitem_row_float(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[1.]
def test_delitem1(self, table_types):
self._setup(table_types)
del self.t['a']
assert self.t.columns.keys() == []
assert self.t.as_array() is None
def test_delitem2(self, table_types):
self._setup(table_types)
del self.t2['b']
assert self.t2.colnames == ['a', 'c']
def test_delitems(self, table_types):
self._setup(table_types)
del self.t2['a', 'b']
assert self.t2.colnames == ['c']
def test_delitem_fail(self, table_types):
self._setup(table_types)
with pytest.raises(KeyError):
del self.t['d']
@pytest.mark.usefixtures('table_types')
class TestKeep(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns([])
assert t.columns.keys() == []
assert t.as_array() is None
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns('b')
assert t.columns.keys() == ['b']
assert t.dtype.names == ('b',)
assert np.all(t['b'] == np.array([4, 5, 6]))
@pytest.mark.usefixtures('table_types')
class TestRename(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.rename_column('a', 'b')
assert t.columns.keys() == ['b']
assert t.dtype.names == ('b',)
assert np.all(t['b'] == np.array([1, 2, 3]))
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.rename_column('a', 'c')
t.rename_column('b', 'a')
assert t.columns.keys() == ['c', 'a']
assert t.dtype.names == ('c', 'a')
if t.masked:
assert t.mask.dtype.names == ('c', 'a')
assert np.all(t['c'] == np.array([1, 2, 3]))
assert np.all(t['a'] == np.array([4, 5, 6]))
def test_rename_by_attr(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t['a'].name = 'c'
t['b'].name = 'a'
assert t.columns.keys() == ['c', 'a']
assert t.dtype.names == ('c', 'a')
assert np.all(t['c'] == np.array([1, 2, 3]))
assert np.all(t['a'] == np.array([4, 5, 6]))
@pytest.mark.usefixtures('table_types')
class TestSort():
def test_single(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4]))
t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)]))
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
t.sort('a')
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['b'] == np.array([5, 6, 4]))
assert np.all(t['c'] == np.array([[3, 4],
[1, 2],
[4, 5]]))
t.sort('b')
assert np.all(t['a'] == np.array([3, 1, 2]))
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['c'] == np.array([[4, 5],
[3, 4],
[1, 2]]))
def test_single_big(self, table_types):
"""Sort a big-ish table with a non-trivial sort order"""
x = np.arange(10000)
y = np.sin(x)
t = table_types.Table([x, y], names=('x', 'y'))
t.sort('y')
idx = np.argsort(y)
assert np.all(t['x'] == x[idx])
assert np.all(t['y'] == y[idx])
def test_empty(self, table_types):
t = table_types.Table([[], []], dtype=['f4', 'U1'])
t.sort('col1')
def test_multiple(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(['a', 'b'])
assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5]))
t.sort(['b', 'a'])
assert np.all(t['a'] == np.array([2, 1, 3, 1, 3, 2]))
assert np.all(t['b'] == np.array([3, 4, 4, 5, 5, 6]))
t.sort(('a', 'b'))
assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5]))
def test_multiple_with_bytes(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"]))
t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
t.sort(['name', 'firstname'])
assert np.all([t['firstname'] == np.array([b"John", b"Jo", b"Max"])])
assert np.all([t['name'] == np.array([b"Jackson", b"Miller", b"Miller"])])
assert np.all([t['tel'] == np.array([19, 15, 12])])
def test_multiple_with_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(table_types.Column(
name='firstname',
data=[str(x) for x in ["Max", "Jo", "John"]]))
t.add_column(table_types.Column(
name='name',
data=[str(x) for x in ["Miller", "Miller", "Jackson"]]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
t.sort(['name', 'firstname'])
assert np.all([t['firstname'] == np.array(
[str(x) for x in ["John", "Jo", "Max"]])])
assert np.all([t['name'] == np.array(
[str(x) for x in ["Jackson", "Miller", "Miller"]])])
assert np.all([t['tel'] == np.array([19, 15, 12])])
def test_argsort(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t.argsort() == t.as_array().argsort())
i0 = t.argsort('a')
i1 = t.as_array().argsort(order=['a'])
assert np.all(t['a'][i0] == t['a'][i1])
i0 = t.argsort(['a', 'b'])
i1 = t.as_array().argsort(order=['a', 'b'])
assert np.all(t['a'][i0] == t['a'][i1])
assert np.all(t['b'][i0] == t['b'][i1])
def test_argsort_bytes(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"]))
t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0]))
def test_argsort_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(table_types.Column(
name='firstname',
data=[str(x) for x in ["Max", "Jo", "John"]]))
t.add_column(table_types.Column(
name='name',
data=[str(x) for x in ["Miller", "Miller", "Jackson"]]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0]))
def test_rebuild_column_view_then_rename(self, table_types):
"""
Issue #2039 where renaming fails after any method that calls
_rebuild_table_column_view (this includes sort and add_row).
"""
t = table_types.Table([[1]], names=('a',))
assert t.colnames == ['a']
assert t.dtype.names == ('a',)
t.add_row((2,))
assert t.colnames == ['a']
assert t.dtype.names == ('a',)
t.rename_column('a', 'b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
t.sort('b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
t.rename_column('b', 'c')
assert t.colnames == ['c']
assert t.dtype.names == ('c',)
@pytest.mark.usefixtures('table_types')
class TestIterator():
def test_iterator(self, table_types):
d = np.array([(2, 1),
(3, 6),
(4, 5)], dtype=[(str('a'), 'i4'), (str('b'), 'i4')])
t = table_types.Table(d)
if t.masked:
with pytest.raises(ValueError):
t[0] == d[0]
else:
for row, np_row in zip(t, d):
assert np.all(row == np_row)
@pytest.mark.usefixtures('table_types')
class TestSetMeta():
def test_set_meta(self, table_types):
d = table_types.Table(names=('a', 'b'))
d.meta['a'] = 1
d.meta['b'] = 1
d.meta['c'] = 1
d.meta['d'] = 1
assert list(d.meta.keys()) == ['a', 'b', 'c', 'd']
@pytest.mark.usefixtures('table_types')
class TestConvertNumpyArray():
def test_convert_numpy_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=('a', 'b'))
np_data = np.array(d)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert np_data is not d.as_array()
assert d.colnames == list(np_data.dtype.names)
np_data = np.array(d, copy=False)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert d.colnames == list(np_data.dtype.names)
with pytest.raises(ValueError):
np_data = np.array(d, dtype=[(str('c'), 'i8'), (str('d'), 'i8')])
def test_as_array_byteswap(self, table_types):
"""Test for https://github.com/astropy/astropy/pull/4080"""
byte_orders = ('>', '<')
native_order = byte_orders[sys.byteorder == 'little']
for order in byte_orders:
col = table_types.Column([1.0, 2.0], name='a', dtype=order + 'f8')
t = table_types.Table([col])
arr = t.as_array()
assert arr['a'].dtype.byteorder in (native_order, '=')
arr = t.as_array(keep_byteorder=True)
if order == native_order:
assert arr['a'].dtype.byteorder in (order, '=')
else:
assert arr['a'].dtype.byteorder == order
def test_byteswap_fits_array(self, table_types):
"""
Test for https://github.com/astropy/astropy/pull/4080, demonstrating
that FITS tables are converted to native byte order.
"""
non_native_order = ('>', '<')[sys.byteorder != 'little']
filename = get_pkg_data_filename('data/tb.fits',
'astropy.io.fits.tests')
t = table_types.Table.read(filename)
arr = t.as_array()
for idx in range(len(arr.dtype)):
assert arr.dtype[idx].byteorder != non_native_order
with fits.open(filename, character_as_bytes=True) as hdul:
data = hdul[1].data
for colname in data.columns.names:
assert np.all(data[colname] == arr[colname])
arr2 = t.as_array(keep_byteorder=True)
for colname in data.columns.names:
assert (data[colname].dtype.byteorder ==
arr2[colname].dtype.byteorder)
def _assert_copies(t, t2, deep=True):
assert t.colnames == t2.colnames
np.testing.assert_array_equal(t.as_array(), t2.as_array())
assert t.meta == t2.meta
for col, col2 in zip(t.columns.values(), t2.columns.values()):
if deep:
assert not np.may_share_memory(col, col2)
else:
assert np.may_share_memory(col, col2)
def test_copy():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'])
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_masked():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'], masked=True,
meta={'name': 'test'})
t['x'].mask == [True, False, True]
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_protocol():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'])
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
_assert_copies(t, t2, deep=False)
_assert_copies(t, t3)
def test_disallow_inequality_comparisons():
"""
Regression test for #828 - disallow comparison operators on whole Table
"""
t = table.Table()
with pytest.raises(TypeError):
t > 2
with pytest.raises(TypeError):
t < 1.1
with pytest.raises(TypeError):
t >= 5.5
with pytest.raises(TypeError):
t <= -1.1
def test_equality():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all((t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
def test_equality_masked():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# Make into masked table
t = table.Table(t, masked=True)
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that masking a value causes the row to differ
t.mask['a'][0] = True
assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all((t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
@pytest.mark.xfail
def test_equality_masked_bug():
"""
This highlights a Numpy bug. Once it works, it can be moved into the
test_equality_masked test. Related Numpy bug report:
https://github.com/numpy/numpy/issues/3840
"""
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
t = table.Table(t, masked=True)
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
assert np.all((t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
from ...utils.tests.test_metadata import MetaBaseTest
class TestMetaTable(MetaBaseTest):
test_class = table.Table
args = ()
def test_unicode_content():
# If we don't have unicode literals then return
if isinstance('', bytes):
return
# Define unicode literals
string_a = 'астрономическая питона'
string_b = 'миллиарды световых лет'
a = table.Table(
[[string_a, 2],
[string_b, 3]],
names=('a', 'b'))
assert string_a in str(a)
# This only works because the coding of this file is utf-8, which
# matches the default encoding of Table.__str__
assert string_a.encode('utf-8') in bytes(a)
def test_unicode_policy():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
assert_follows_unicode_guidelines(t)
def test_unicode_bytestring_conversion(table_types):
t = table_types.Table([['abc'], ['def'], [1]], dtype=('S', 'U', 'i'))
assert t['col0'].dtype.kind == 'S'
assert t['col1'].dtype.kind == 'U'
assert t['col2'].dtype.kind == 'i'
t1 = t.copy()
t1.convert_unicode_to_bytestring()
assert t1['col0'].dtype.kind == 'S'
assert t1['col1'].dtype.kind == 'S'
assert t1['col2'].dtype.kind == 'i'
assert t1['col0'][0] == 'abc'
assert t1['col1'][0] == 'def'
assert t1['col2'][0] == 1
t1 = t.copy()
t1.convert_bytestring_to_unicode()
assert t1['col0'].dtype.kind == 'U'
assert t1['col1'].dtype.kind == 'U'
assert t1['col2'].dtype.kind == 'i'
assert t1['col0'][0] == str('abc')
assert t1['col1'][0] == str('def')
assert t1['col2'][0] == 1
def test_table_deletion():
"""
Regression test for the reference cycle discussed in
https://github.com/astropy/astropy/issues/2877
"""
deleted = set()
# A special table subclass which leaves a record when it is finalized
class TestTable(table.Table):
def __del__(self):
deleted.add(id(self))
t = TestTable({'a': [1, 2, 3]})
the_id = id(t)
assert t['a'].parent_table is t
del t
# Cleanup
gc.collect()
assert the_id in deleted
def test_nested_iteration():
"""
Regression test for issue 3358 where nested iteration over a single table fails.
"""
t = table.Table([[0, 1]], names=['a'])
out = []
for r1 in t:
for r2 in t:
out.append((r1['a'], r2['a']))
assert out == [(0, 0), (0, 1), (1, 0), (1, 1)]
def test_table_init_from_degenerate_arrays(table_types):
t = table_types.Table(np.array([]))
assert len(t.columns) == 0
with pytest.raises(ValueError):
t = table_types.Table(np.array(0))
t = table_types.Table(np.array([1, 2, 3]))
assert len(t.columns) == 3
@pytest.mark.skipif('not HAS_PANDAS')
class TestPandas:
def test_simple(self):
t = table.Table()
for endian in ['<', '>']:
for kind in ['f', 'i']:
for byte in ['2', '4', '8']:
dtype = np.dtype(endian + kind + byte)
x = np.array([1, 2, 3], dtype=dtype)
t[endian + kind + byte] = x
t['u'] = ['a', 'b', 'c']
t['s'] = ['a', 'b', 'c']
d = t.to_pandas()
for column in t.columns:
if column == 'u':
assert np.all(t['u'] == np.array(['a', 'b', 'c']))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
elif column == 's':
assert np.all(t['s'] == np.array(['a', 'b', 'c']))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
else:
# We should be able to compare exact values here
assert np.all(t[column] == d[column])
if t[column].dtype.byteorder in ('=', '|'):
assert d[column].dtype == t[column].dtype
else:
assert d[column].dtype == t[column].byteswap().newbyteorder().dtype
# Regression test for astropy/astropy#1156 - the following code gave a
# ValueError: Big-endian buffer not supported on little-endian
# compiler. We now automatically swap the endian-ness to native order
# upon adding the arrays to the data frame.
d[['<i4', '>i4']]
d[['<f4', '>f4']]
t2 = table.Table.from_pandas(d)
for column in t.columns:
if column in ('u', 's'):
assert np.all(t[column] == t2[column])
else:
assert_allclose(t[column], t2[column])
if t[column].dtype.byteorder in ('=', '|'):
assert t[column].dtype == t2[column].dtype
else:
assert t[column].byteswap().newbyteorder().dtype == t2[column].dtype
def test_2d(self):
t = table.Table()
t['a'] = [1, 2, 3]
t['b'] = np.ones((3, 2))
with pytest.raises(ValueError) as exc:
t.to_pandas()
assert exc.value.args[0] == "Cannot convert a table with multi-dimensional columns to a pandas DataFrame"
def test_mixin(self):
from ...coordinates import SkyCoord
t = table.Table()
t['c'] = SkyCoord([1, 2, 3], [4, 5, 6], unit='deg')
with pytest.raises(ValueError) as exc:
t.to_pandas()
assert exc.value.args[0] == "Cannot convert a table with mixin columns to a pandas DataFrame"
def test_masking(self):
t = table.Table(masked=True)
t['a'] = [1, 2, 3]
t['a'].mask = [True, False, True]
t['b'] = [1., 2., 3.]
t['b'].mask = [False, False, True]
t['u'] = ['a', 'b', 'c']
t['u'].mask = [False, True, False]
t['s'] = ['a', 'b', 'c']
t['s'].mask = [False, True, False]
d = t.to_pandas()
t2 = table.Table.from_pandas(d)
for name, column in t.columns.items():
assert np.all(column.data == t2[name].data)
assert np.all(column.mask == t2[name].mask)
# Masked integer type comes back as float. Nothing we can do about this.
if column.dtype.kind == 'i':
assert t2[name].dtype.kind == 'f'
else:
if column.dtype.byteorder in ('=', '|'):
assert column.dtype == t2[name].dtype
else:
assert column.byteswap().newbyteorder().dtype == t2[name].dtype
@pytest.mark.usefixtures('table_types')
class TestReplaceColumn(SetupData):
def test_fail_replace_column(self, table_types):
"""Raise exception when trying to replace column via table.columns object"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t.columns['a'] = [1, 2, 3]
with pytest.raises(ValueError):
t.replace_column('not there', [1, 2, 3])
def test_replace_column(self, table_types):
"""Replace existing column with a new column"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
ta = t['a']
tb = t['b']
vals = [1.2, 3.4, 5.6]
for col in (vals,
table_types.Column(vals),
table_types.Column(vals, name='a'),
table_types.Column(vals, name='b')):
t.replace_column('a', col)
assert np.all(t['a'] == vals)
assert t['a'] is not ta # New a column
assert t['b'] is tb # Original b column unchanged
assert t.colnames == ['a', 'b']
assert t['a'].meta == {}
assert t['a'].format is None
def test_replace_index_column(self, table_types):
"""Replace index column and generate expected exception"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_index('a')
with pytest.raises(ValueError) as err:
t.replace_column('a', [1, 2, 3])
assert err.value.args[0] == 'cannot replace a table index column'
class Test__Astropy_Table__():
"""
Test initializing a Table subclass from a table-like object that
implements the __astropy_table__ interface method.
"""
class SimpleTable:
def __init__(self):
self.columns = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9] * u.m]
self.names = ['a', 'b', 'c']
self.meta = OrderedDict([('a', 1), ('b', 2)])
def __astropy_table__(self, cls, copy, **kwargs):
a, b, c = self.columns
c.info.name = 'c'
cols = [table.Column(a, name='a'),
table.MaskedColumn(b, name='b'),
c]
names = [col.info.name for col in cols]
return cls(cols, names=names, copy=copy, meta=kwargs or self.meta)
def test_simple_1(self):
"""Make a SimpleTable and convert to Table, QTable with copy=False, True"""
for table_cls in (table.Table, table.QTable):
col_c_class = u.Quantity if table_cls is table.QTable else table.MaskedColumn
for cpy in (False, True):
st = self.SimpleTable()
# Test putting in a non-native kwarg `extra_meta` to Table initializer
t = table_cls(st, copy=cpy, extra_meta='extra!')
assert t.colnames == ['a', 'b', 'c']
assert t.meta == {'extra_meta': 'extra!'}
assert np.all(t['a'] == st.columns[0])
assert np.all(t['b'] == st.columns[1])
vals = t['c'].value if table_cls is table.QTable else t['c']
assert np.all(st.columns[2].value == vals)
assert isinstance(t['a'], table.MaskedColumn)
assert isinstance(t['b'], table.MaskedColumn)
assert isinstance(t['c'], col_c_class)
assert t['c'].unit is u.m
assert type(t) is table_cls
# Copy being respected?
t['a'][0] = 10
assert st.columns[0][0] == 1 if cpy else 10
def test_simple_2(self):
"""Test converting a SimpleTable and changing column names and types"""
st = self.SimpleTable()
dtypes = [np.int32, np.float32, np.float16]
names = ['a', 'b', 'c']
t = table.Table(st, dtype=dtypes, names=names, meta=OrderedDict([('c', 3)]))
assert t.colnames == names
assert all(col.dtype.type is dtype
for col, dtype in zip(t.columns.values(), dtypes))
# The supplied meta is ignored. This is consistent with current
# behavior when initializing from an existing astropy Table.
assert t.meta == st.meta
def test_kwargs_exception(self):
"""If extra kwargs provided but without initializing with a table-like
object, exception is raised"""
with pytest.raises(TypeError) as err:
table.Table([[1]], extra_meta='extra!')
assert '__init__() got unexpected keyword argument' in str(err)
def test_replace_column_qtable():
"""Replace existing Quantity column with a new column in a QTable"""
a = [1, 2, 3] * u.m
b = [4, 5, 6]
t = table.QTable([a, b], names=['a', 'b'])
ta = t['a']
tb = t['b']
ta.info.meta = {'aa': [0, 1, 2, 3, 4]}
ta.info.format = '%f'
t.replace_column('a', a.to('cm'))
assert np.all(t['a'] == ta)
assert t['a'] is not ta # New a column
assert t['b'] is tb # Original b column unchanged
assert t.colnames == ['a', 'b']
assert t['a'].info.meta is None
assert t['a'].info.format is None
def test_replace_update_column_via_setitem():
"""
Test table update like ``t['a'] = value``. This leverages off the
already well-tested ``replace_column`` and in-place update
``t['a'][:] = value``, so this testing is fairly light.
"""
a = [1, 2] * u.m
b = [3, 4]
t = table.QTable([a, b], names=['a', 'b'])
assert isinstance(t['a'], u.Quantity)
# Inplace update
ta = t['a']
t['a'] = 5 * u.m
assert np.all(t['a'] == [5, 5] * u.m)
assert t['a'] is ta
# Replace
t['a'] = [5, 6]
assert np.all(t['a'] == [5, 6])
assert isinstance(t['a'], table.Column)
assert t['a'] is not ta
def test_replace_update_column_via_setitem_warnings_normal():
"""
Test warnings related to table replace change in #5556:
Normal warning-free replace
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = 0 # in-place update
assert len(w) == 0
t['a'] = [10, 20, 30] # replace column
assert len(w) == 0
def test_replace_update_column_via_setitem_warnings_slice():
"""
Test warnings related to table replace change in #5556:
Replace a slice, one warning.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t2 = t[:2]
t2['a'] = 0 # in-place slice update
assert np.all(t['a'] == [0, 0, 3])
assert len(w) == 0
t2['a'] = [10, 20] # replace slice
assert len(w) == 1
assert "replaced column 'a' which looks like an array slice" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_attributes():
"""
Test warnings related to table replace change in #5556:
Lost attributes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
t['a'].unit = 'm'
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert len(w) == 1
assert "replaced column 'a' and column attributes ['unit']" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_refcount():
"""
Test warnings related to table replace change in #5556:
Reference count changes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a'] # Generate an extra reference to original column
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert len(w) == 1
assert "replaced column 'a' and the number of references" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_always():
"""
Test warnings related to table replace change in #5556:
Test 'always' setting that raises warning for any replace.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings', ['always']):
t['a'] = 0 # in-place slice update
assert len(w) == 0
from inspect import currentframe, getframeinfo
frameinfo = getframeinfo(currentframe())
t['a'] = [10, 20, 30] # replace column
assert len(w) == 1
assert "replaced column 'a'" == str(w[0].message)
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert w[0].category is table.TableReplaceWarning
assert 'test_table' in w[0].filename
def test_replace_update_column_via_setitem_replace_inplace():
"""
Test the replace_inplace config option related to #5556. In this
case no replace is done.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a']
t['a'].unit = 'm'
with catch_warnings() as w:
with table.conf.set_temp('replace_inplace', True):
with table.conf.set_temp('replace_warnings',
['always', 'refcount', 'attributes', 'slice']):
t['a'] = 0 # in-place update
assert len(w) == 0
assert ta is t['a']
t['a'] = [10, 20, 30] # normally replaces column, but not now
assert len(w) == 0
assert ta is t['a']
assert np.all(t['a'] == [10, 20, 30])
def test_primary_key_is_inherited():
"""Test whether a new Table inherits the primary_key attribute from
its parent Table. Issue #4672"""
t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=('a', 'b'))
t.add_index('a')
original_key = t.primary_key
# can't test if tuples are equal, so just check content
assert original_key[0] is 'a'
t2 = t[:]
t3 = t.copy()
t4 = table.Table(t)
# test whether the reference is the same in the following
assert original_key == t2.primary_key
assert original_key == t3.primary_key
assert original_key == t4.primary_key
# just test one element, assume rest are equal if assert passes
assert t.loc[1] == t2.loc[1]
assert t.loc[1] == t3.loc[1]
assert t.loc[1] == t4.loc[1]
def test_qtable_read_for_ipac_table_with_char_columns():
'''Test that a char column of a QTable is assigned no unit and not
a dimensionless unit, otherwise conversion of reader output to
QTable fails.'''
t1 = table.QTable([["A"]], names="B")
out = StringIO()
t1.write(out, format="ascii.ipac")
t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False)
assert t2["B"].unit is None
| bsd-3-clause |
boada/planckClusters | MOSAICpipe/bpz-1.99.3/bpz.py | 1 | 52171 | """
bpz: Bayesian Photo-Z estimation
Reference: Benitez 2000, ApJ, 536, p.571
Usage:
python bpz.py catalog.cat
Needs a catalog.columns file which describes the contents of catalog.cat
"""
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import map
from builtins import input
from builtins import range
from past.utils import old_div
from useful import *
rolex = watch()
rolex.set()
#from Numeric import *
from numpy import *
from bpz_tools import *
from string import *
import os, glob, sys
import time
import pickle
import shelve
from coetools import pause, params_cl
class Printer():
"""Print things to stdout on one line dynamically"""
def __init__(self, data):
sys.stdout.write("\r\x1b[K" + data.__str__())
sys.stdout.flush()
def seglist(vals, mask=None):
"""Split vals into lists based on mask > 0"""
if mask == None:
mask = greater(vals, 0)
lists = []
i = 0
lastgood = False
list1 = []
for i in range(len(vals)):
if mask[i] == False:
if lastgood:
lists.append(list1)
list1 = []
lastgood = False
if mask[i]:
list1.append(vals[i])
lastgood = True
if lastgood:
lists.append(list1)
return lists
# Initialization and definitions#
#Current directory
homedir = os.getcwd()
#Parameter definition
pars = params()
pars.d = {
'SPECTRA': 'CWWSB4.list', # template list
#'PRIOR': 'hdfn_SB', # prior name
'PRIOR': 'hdfn_gen', # prior name
'NTYPES':
None, # Number of Elliptical, Spiral, and Starburst/Irregular templates Default: 1,2,n-3
'DZ': 0.01, # redshift resolution
'ZMIN': 0.01, # minimum redshift
'ZMAX': 10., # maximum redshift
'MAG': 'yes', # Data in magnitudes?
'MIN_MAGERR': 0.001, # minimum magnitude uncertainty --DC
'ODDS': 0.95, # Odds threshold: affects confidence limits definition
'INTERP':
0, # Number of interpolated templates between each of the original ones
'EXCLUDE': 'none', # Filters to be excluded from the estimation
'NEW_AB': 'no', # If yes, generate new AB files even if they already exist
'CHECK':
'yes', # Perform some checks, compare observed colors with templates, etc.
'VERBOSE': 'yes', # Print estimated redshifts to the standard output
'PROBS':
'no', # Save all the galaxy probability distributions (it will create a very large file)
'PROBS2':
'no', # Save all the galaxy probability distributions P(z,t) (but not priors) -- Compact
'PROBS_LITE': 'yes', # Save only the final probability distribution
'GET_Z': 'yes', # Actually obtain photo-z
'ONLY_TYPE': 'no', # Use spectroscopic redshifts instead of photo-z
'MADAU': 'yes', #Apply Madau correction to spectra
'Z_THR': 0, #Integrate probability for z>z_thr
'COLOR': 'no', #Use colors instead of fluxes
'PLOTS': 'no', #Don't produce plots
'INTERACTIVE': 'yes', #Don't query the user
'PHOTO_ERRORS':
'no', #Define the confidence interval using only the photometric errors
'MIN_RMS':
0.05, #"Intrinsic" photo-z rms in dz /(1+z) (Change to 0.05 for templates from Benitez et al. 2004
'N_PEAKS': 1,
'MERGE_PEAKS': 'no',
'CONVOLVE_P': 'yes',
'P_MIN': 1e-2,
'SED_DIR': sed_dir,
'AB_DIR': ab_dir,
'FILTER_DIR': fil_dir,
'DELTA_M_0': 0.,
'ZP_OFFSETS': 0.,
'ZC': None,
'FC': None,
"ADD_SPEC_PROB": None,
"ADD_CONTINUOUS_PROB": None,
"NMAX": None # Useful for testing
}
if pars.d['PLOTS'] == 'no': plots = 0
if plots:
# If pylab installed show plots
plots = 'pylab'
try:
import matplotlib
matplotlib.use('TkAgg')
from pylab import *
# from coeplot2a import *
plot([1])
title('KILL THIS WINDOW!')
show()
ioff()
except:
try:
from biggles import *
plots = 'biggles'
except:
plots = 0
#Define the default values of the parameters
pars.d['INPUT'] = sys.argv[1] # catalog with the photometry
obs_file = pars.d['INPUT']
root = os.path.splitext(pars.d['INPUT'])[0]
pars.d[
'COLUMNS'] = root + '.columns' # column information for the input catalog
pars.d['OUTPUT'] = root + '.bpz' # output
nargs = len(sys.argv)
ipar = 2
if nargs > 2: #Check for parameter file and update parameters
if sys.argv[2] == '-P':
pars.fromfile(sys.argv[3])
ipar = 4
# Update the parameters using command line additions
#pars.fromcommandline(sys.argv[ipar:])
#for key in pars.d:
# print key, pars.d[key]
#pause()
pars.d.update(
params_cl()) # allows for flag only (no value after), e.g., -CHECK
def updateblank(var, ext):
global pars
if pars.d[var] in [None, 'yes']:
pars.d[var] = root + '.' + ext
updateblank('CHECK', 'flux_comparison')
updateblank('PROBS_LITE', 'probs')
updateblank('PROBS', 'full_probs')
updateblank('PROBS2', 'chisq')
#if pars.d['CHECK'] in [None, 'yes']:
# pars.d['CHECK'] = root+'.flux_comparison'
#This allows to change the auxiliary directories used by BPZ
if pars.d['SED_DIR'] != sed_dir:
print("Changing sed_dir to ", pars.d['SED_DIR'])
sed_dir = pars.d['SED_DIR']
if sed_dir[-1] != '/': sed_dir += '/'
if pars.d['AB_DIR'] != ab_dir:
print("Changing ab_dir to ", pars.d['AB_DIR'])
ab_dir = pars.d['AB_DIR']
if ab_dir[-1] != '/': ab_dir += '/'
if pars.d['FILTER_DIR'] != fil_dir:
print("Changing fil_dir to ", pars.d['FILTER_DIR'])
fil_dir = pars.d['FILTER_DIR']
if fil_dir[-1] != '/': fil_dir += '/'
#Better safe than sorry
if pars.d['OUTPUT'] == obs_file or pars.d['PROBS'] == obs_file or pars.d[
'PROBS2'] == obs_file or pars.d['PROBS_LITE'] == obs_file:
print("This would delete the input file!")
sys.exit()
if pars.d['OUTPUT'] == pars.d['COLUMNS'] or pars.d['PROBS_LITE'] == pars.d[
'COLUMNS'] or pars.d['PROBS'] == pars.d['COLUMNS']:
print("This would delete the .columns file!")
sys.exit()
#Assign the intrinsin rms
if pars.d['SPECTRA'] == 'CWWSB.list':
print('Setting the intrinsic rms to 0.067(1+z)')
pars.d['MIN_RMS'] = 0.067
pars.d['MIN_RMS'] = float(pars.d['MIN_RMS'])
pars.d['MIN_MAGERR'] = float(pars.d['MIN_MAGERR'])
if pars.d['INTERACTIVE'] == 'no': interactive = 0
else: interactive = 1
if pars.d['VERBOSE'] == 'yes':
print("Current parameters")
view_keys(pars.d)
pars.d['N_PEAKS'] = int(pars.d['N_PEAKS'])
if pars.d["ADD_SPEC_PROB"] != None:
specprob = 1
specfile = pars.d["ADD_SPEC_PROB"]
spec = get_2Darray(specfile)
ns = spec.shape[1]
if old_div(ns, 2) != (old_div(ns, 2.)):
print("Number of columns in SPEC_PROB is odd")
sys.exit()
z_spec = spec[:, :old_div(ns, 2)]
p_spec = spec[:, old_div(ns, 2):]
# Write output file header
header = "#ID "
header += ns / 2 * " z_spec%i"
header += ns / 2 * " p_spec%i"
header += "\n"
header = header % tuple(list(range(old_div(ns, 2))) + list(range(old_div(
ns, 2))))
specout = open(specfile.split()[0] + ".p_spec", "w")
specout.write(header)
else:
specprob = 0
pars.d['DELTA_M_0'] = float(pars.d['DELTA_M_0'])
#Some misc. initialization info useful for the .columns file
#nofilters=['M_0','OTHER','ID','Z_S','X','Y']
nofilters = ['M_0', 'OTHER', 'ID', 'Z_S']
#Numerical codes for nondetection, etc. in the photometric catalog
unobs = -99. #Objects not observed
undet = 99. #Objects not detected
#Define the z-grid
zmin = float(pars.d['ZMIN'])
zmax = float(pars.d['ZMAX'])
if zmin > zmax: raise 'zmin < zmax !'
dz = float(pars.d['DZ'])
linear = 1
if linear:
z = arange(zmin, zmax + dz, dz)
else:
if zmax != 0.:
zi = zmin
z = []
while zi <= zmax:
z.append(zi)
zi = zi + dz * (1. + zi)
z = array(z)
else:
z = array([0.])
#Now check the contents of the FILTERS,SED and A diBrectories
#Get the filters in stock
filters_db = []
filters_db = glob.glob(fil_dir + '*.res')
for i in range(len(filters_db)):
filters_db[i] = os.path.basename(filters_db[i])
filters_db[i] = filters_db[i][:-4]
#Get the SEDs in stock
sed_db = []
sed_db = glob.glob(sed_dir + '*.sed')
for i in range(len(sed_db)):
sed_db[i] = os.path.basename(sed_db[i])
sed_db[i] = sed_db[i][:-4]
#Get the ABflux files in stock
ab_db = []
ab_db = glob.glob(ab_dir + '*.AB')
for i in range(len(ab_db)):
ab_db[i] = os.path.basename(ab_db[i])
ab_db[i] = ab_db[i][:-3]
#Get a list with the filter names and check whether they are in stock
col_file = pars.d['COLUMNS']
filters = get_str(col_file, 0)
for cosa in nofilters:
if filters.count(cosa): filters.remove(cosa)
if pars.d['EXCLUDE'] != 'none':
if type(pars.d['EXCLUDE']) == type(' '):
pars.d['EXCLUDE'] = [pars.d['EXCLUDE']]
for cosa in pars.d['EXCLUDE']:
if filters.count(cosa): filters.remove(cosa)
for filter in filters:
if filter[-4:] == '.res': filter = filter[:-4]
if filter not in filters_db:
print('filter ', filter, 'not in database at', fil_dir, ':')
if ask('Print filters in database?'):
for line in filters_db:
print(line)
sys.exit()
#Get a list with the spectrum names and check whether they're in stock
#Look for the list in the home directory first,
#if it's not there, look in the SED directory
spectra_file = os.path.join(homedir, pars.d['SPECTRA'])
if not os.path.exists(spectra_file):
spectra_file = os.path.join(sed_dir, pars.d['SPECTRA'])
spectra = get_str(spectra_file, 0)
for i in range(len(spectra)):
if spectra[i][-4:] == '.sed': spectra[i] = spectra[i][:-4]
nf = len(filters)
nt = len(spectra)
nz = len(z)
#Get the model fluxes
f_mod = zeros((nz, nt, nf)) * 0.
abfiles = []
for it in range(nt):
for jf in range(nf):
if filters[jf][-4:] == '.res': filtro = filters[jf][:-4]
else: filtro = filters[jf]
#model = join([spectra[it], filtro, 'AB'], '.')
model = '.'.join([spectra[it], filtro, 'AB'])
model_path = os.path.join(ab_dir, model)
abfiles.append(model)
#Generate new ABflux files if not present
# or if new_ab flag on
if pars.d['NEW_AB'] == 'yes' or model[:-3] not in ab_db:
if spectra[it] not in sed_db:
print('SED ', spectra[it], 'not in database at', sed_dir)
# for line in sed_db:
# print line
sys.exit()
#print spectra[it],filters[jf]
print(' Generating ', model, '....')
ABflux(spectra[it], filtro, madau=pars.d['MADAU'])
#z_ab=arange(0.,zmax_ab,dz_ab) #zmax_ab and dz_ab are def. in bpz_tools
# abflux=f_z_sed(spectra[it],filters[jf], z_ab,units='nu',madau=pars.d['MADAU'])
# abflux=clip(abflux,0.,1e400)
# buffer=join(['#',spectra[it],filters[jf], 'AB','\n'])
#for i in range(len(z_ab)):
# buffer=buffer+join([`z_ab[i]`,`abflux[i]`,'\n'])
#open(model_path,'w').write(buffer)
#zo=z_ab
#f_mod_0=abflux
#else:
#Read the data
zo, f_mod_0 = get_data(model_path, (0, 1))
#Rebin the data to the required redshift resolution
f_mod[:, it, jf] = match_resol(zo, f_mod_0, z)
#if sometrue(less(f_mod[:,it,jf],0.)):
if less(f_mod[:, it, jf], 0.).any():
print('Warning: some values of the model AB fluxes are <0')
print('due to the interpolation ')
print('Clipping them to f>=0 values')
#To avoid rounding errors in the calculation of the likelihood
f_mod[:, it, jf] = clip(f_mod[:, it, jf], 0., 1e300)
#We forbid f_mod to take values in the (0,1e-100) interval
#f_mod[:,it,jf]=where(less(f_mod[:,it,jf],1e-100)*greater(f_mod[:,it,jf],0.),0.,f_mod[:,it,jf])
#Here goes the interpolacion between the colors
ninterp = int(pars.d['INTERP'])
ntypes = pars.d['NTYPES']
if ntypes == None:
nt0 = nt
else:
nt0 = list(ntypes)
for i, nt1 in enumerate(nt0):
print(i, nt1)
nt0[i] = int(nt1)
if (len(nt0) != 3) or (sum(nt0) != nt):
print()
print('%d ellipticals + %d spirals + %d ellipticals' % tuple(nt0))
print('does not add up to %d templates' % nt)
print('USAGE: -NTYPES nell,nsp,nsb')
print('nell = # of elliptical templates')
print('nsp = # of spiral templates')
print('nsb = # of starburst templates')
print(
'These must add up to the number of templates in the SPECTRA list')
print('Quitting BPZ.')
sys.exit()
if ninterp:
nti = nt + (nt - 1) * ninterp
buffer = zeros((nz, nti, nf)) * 1.
tipos = arange(0., float(nti), float(ninterp) + 1.)
xtipos = arange(float(nti))
for iz in arange(nz):
for jf in range(nf):
buffer[iz, :, jf] = match_resol(tipos, f_mod[iz, :, jf], xtipos)
nt = nti
f_mod = buffer
#for j in range(nf):
# plot=FramedPlot()
# for i in range(nt): plot.add(Curve(z,log(f_mod[:,i,j]+1e-40)))
# plot.show()
# ask('More?')
#Load all the parameters in the columns file to a dictionary
col_pars = params()
col_pars.fromfile(col_file)
# Read which filters are in which columns
flux_cols = []
eflux_cols = []
cals = []
zp_errors = []
zp_offsets = []
for filter in filters:
datos = col_pars.d[filter]
flux_cols.append(int(datos[0]) - 1)
eflux_cols.append(int(datos[1]) - 1)
cals.append(datos[2])
zp_errors.append(datos[3])
zp_offsets.append(datos[4])
zp_offsets = array(list(map(float, zp_offsets)))
if pars.d['ZP_OFFSETS']:
zp_offsets += array(list(map(float, pars.d['ZP_OFFSETS'])))
flux_cols = tuple(flux_cols)
eflux_cols = tuple(eflux_cols)
#READ the flux and errors from obs_file
f_obs = get_2Darray(obs_file, flux_cols)
ef_obs = get_2Darray(obs_file, eflux_cols)
#Convert them to arbitrary fluxes if they are in magnitudes
if pars.d['MAG'] == 'yes':
seen = greater(f_obs, 0.) * less(f_obs, undet)
no_seen = equal(f_obs, undet)
no_observed = equal(f_obs, unobs)
todo = seen + no_seen + no_observed
#The minimum photometric error is 0.01
#ef_obs=ef_obs+seen*equal(ef_obs,0.)*0.001
ef_obs = where(
greater_equal(ef_obs, 0.), clip(ef_obs, pars.d['MIN_MAGERR'], 1e10),
ef_obs)
if add.reduce(add.reduce(todo)) != todo.shape[0] * todo.shape[1]:
print('Objects with unexpected magnitudes!')
print("""Allowed values for magnitudes are
0<m<""" + repr(undet) + " m=" + repr(undet) + "(non detection), m=" + repr(
unobs) + "(not observed)")
for i in range(len(todo)):
if not alltrue(todo[i, :]):
print(i + 1, f_obs[i, :], ef_obs[i, :])
sys.exit()
#Detected objects
try:
f_obs = where(seen, 10.**(-.4 * f_obs), f_obs)
except OverflowError:
print(
'Some of the input magnitudes have values which are >700 or <-700')
print('Purge the input photometric catalog')
print('Minimum value', min(f_obs))
print('Maximum value', max(f_obs))
print('Indexes for minimum values', argmin(f_obs, 0.))
print('Indexes for maximum values', argmax(f_obs, 0.))
print('Bye.')
sys.exit()
try:
ef_obs = where(seen, (10.**(.4 * ef_obs) - 1.) * f_obs, ef_obs)
except OverflowError:
print(
'Some of the input magnitude errors have values which are >700 or <-700')
print('Purge the input photometric catalog')
print('Minimum value', min(ef_obs))
print('Maximum value', max(ef_obs))
print('Indexes for minimum values', argmin(ef_obs, 0.))
print('Indexes for maximum values', argmax(ef_obs, 0.))
print('Bye.')
sys.exit()
#print 'ef', ef_obs[0,:nf]
#print 'f', f_obs[1,:nf]
#print 'ef', ef_obs[1,:nf]
#Looked at, but not detected objects (mag=99.)
#We take the flux equal to zero, and the error in the flux equal to the 1-sigma detection error.
#If m=99, the corresponding error magnitude column in supposed to be dm=m_1sigma, to avoid errors
#with the sign we take the absolute value of dm
f_obs = where(no_seen, 0., f_obs)
ef_obs = where(no_seen, 10.**(-.4 * abs(ef_obs)), ef_obs)
#Objects not looked at (mag=-99.)
f_obs = where(no_observed, 0., f_obs)
ef_obs = where(no_observed, 0., ef_obs)
#Flux codes:
# If f>0 and ef>0 : normal objects
# If f==0 and ef>0 :object not detected
# If f==0 and ef==0: object not observed
#Everything else will crash the program
#Check that the observed error fluxes are reasonable
#if sometrue(less(ef_obs,0.)): raise 'Negative input flux errors'
if less(ef_obs, 0.).any():
raise ValueError('Negative input flux errors')
f_obs = where(less(f_obs, 0.), 0., f_obs) #Put non-detections to 0
ef_obs = where(
less(f_obs, 0.), maximum(1e-100, f_obs + ef_obs),
ef_obs) # Error equivalent to 1 sigma upper limit
#if sometrue(less(f_obs,0.)) : raise 'Negative input fluxes'
seen = greater(f_obs, 0.) * greater(ef_obs, 0.)
no_seen = equal(f_obs, 0.) * greater(ef_obs, 0.)
no_observed = equal(f_obs, 0.) * equal(ef_obs, 0.)
todo = seen + no_seen + no_observed
if add.reduce(add.reduce(todo)) != todo.shape[0] * todo.shape[1]:
print('Objects with unexpected fluxes/errors')
#Convert (internally) objects with zero flux and zero error(non observed)
#to objects with almost infinite (~1e108) error and still zero flux
#This will yield reasonable likelihoods (flat ones) for these objects
ef_obs = where(no_observed, 1e108, ef_obs)
#Include the zero point errors
zp_errors = array(list(map(float, zp_errors)))
zp_frac = e_mag2frac(zp_errors)
#zp_frac=10.**(.4*zp_errors)-1.
ef_obs = where(seen, sqrt(ef_obs * ef_obs + (zp_frac * f_obs)**2), ef_obs)
ef_obs = where(no_seen,
sqrt(ef_obs * ef_obs + (zp_frac * (old_div(ef_obs, 2.)))**2),
ef_obs)
#Add the zero-points offset
#The offsets are defined as m_new-m_old
zp_offsets = array(list(map(float, zp_offsets)))
zp_offsets = where(not_equal(zp_offsets, 0.), 10.**(-.4 * zp_offsets), 1.)
f_obs = f_obs * zp_offsets
ef_obs = ef_obs * zp_offsets
#Convert fluxes to AB if needed
for i in range(f_obs.shape[1]):
if cals[i] == 'Vega':
const = mag2flux(VegatoAB(0., filters[i]))
f_obs[:, i] = f_obs[:, i] * const
ef_obs[:, i] = ef_obs[:, i] * const
elif cals[i] == 'AB':
continue
else:
print('AB or Vega?. Check ' + col_file + ' file')
sys.exit()
#Get m_0 (if present)
if 'M_0' in col_pars.d:
m_0_col = int(col_pars.d['M_0']) - 1
m_0 = get_data(obs_file, m_0_col)
m_0 += pars.d['DELTA_M_0']
#Get the objects ID (as a string)
if 'ID' in col_pars.d:
# print col_pars.d['ID']
id_col = int(col_pars.d['ID']) - 1
id = get_str(obs_file, id_col)
else:
id = list(map(str, list(range(1, len(f_obs[:, 0]) + 1))))
#Get spectroscopic redshifts (if present)
if 'Z_S' in col_pars.d:
z_s_col = int(col_pars.d['Z_S']) - 1
z_s = get_data(obs_file, z_s_col)
#Get the X,Y coordinates
if 'X' in col_pars.d:
datos = col_pars.d['X']
if len(datos) == 1: # OTHERWISE IT'S A FILTER!
x_col = int(col_pars.d['X']) - 1
x = get_data(obs_file, x_col)
if 'Y' in col_pars.d:
datos = col_pars.d['Y']
if len(datos) == 1: # OTHERWISE IT'S A FILTER!
y_col = int(datos) - 1
y = get_data(obs_file, y_col)
#If 'check' on, initialize some variables
check = pars.d['CHECK']
# This generates a file with m,z,T and observed/expected colors
#if check=='yes': pars.d['FLUX_COMPARISON']=root+'.flux_comparison'
checkSED = check != 'no'
ng = f_obs.shape[0]
if checkSED:
# PHOTOMETRIC CALIBRATION CHECK
#r=zeros((ng,nf),float)+1.
#dm=zeros((ng,nf),float)+1.
#w=r*0.
# Defaults: r=1, dm=1, w=0
frat = ones((ng, nf), float)
dmag = ones((ng, nf), float)
fw = zeros((ng, nf), float)
#Visualize the colors of the galaxies and the templates
#When there are spectroscopic redshifts available
if interactive and 'Z_S' in col_pars.d and plots and checkSED and ask(
'Plot colors vs spectroscopic redshifts?'):
color_m = zeros((nz, nt, nf - 1)) * 1.
if plots == 'pylab':
figure(1)
nrows = 2
ncols = old_div((nf - 1), nrows)
if (nf - 1) % nrows: ncols += 1
for i in range(nf - 1):
##plot=FramedPlot()
# Check for overflows
fmu = f_obs[:, i + 1]
fml = f_obs[:, i]
good = greater(fml, 1e-100) * greater(fmu, 1e-100)
zz, fmu, fml = multicompress(good, (z_s, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
if plots == 'pylab':
subplot(nrows, ncols, i + 1)
plot(zz, colour, "bo")
elif plots == 'biggles':
d = Points(zz, colour, color='blue')
plot.add(d)
for it in range(nt):
#Prevent overflows
fmu = f_mod[:, it, i + 1]
fml = f_mod[:, it, i]
good = greater(fml, 1e-100)
zz, fmu, fml = multicompress(good, (z, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
if plots == 'pylab':
plot(zz, colour, "r")
elif plots == 'biggles':
d = Curve(zz, colour, color='red')
plot.add(d)
if plots == 'pylab':
xlabel(r'$z$')
ylabel('%s - %s' % (filters[i], filters[i + 1]))
elif plots == 'biggles':
plot.xlabel = r'$z$'
plot.ylabel = '%s - %s' % (filters[i], filters[i + 1])
plot.save_as_eps('%s-%s.eps' % (filters[i], filters[i + 1]))
plot.show()
if plots == 'pylab':
show()
inp = eval(input('Hit Enter to continue.'))
#Get other information which will go in the output file (as strings)
if 'OTHER' in col_pars.d:
if col_pars.d['OTHER'] != 'all':
other_cols = col_pars.d['OTHER']
if type(other_cols) == type((2, )):
other_cols = tuple(map(int, other_cols))
else:
other_cols = (int(other_cols), )
other_cols = [x - 1 for x in other_cols]
n_other = len(other_cols)
else:
n_other = get_2Darray(obs_file, cols='all', nrows=1).shape[1]
other_cols = list(range(n_other))
others = get_str(obs_file, other_cols)
if len(other_cols) > 1:
other = []
for j in range(len(others[0])):
lista = []
for i in range(len(others)):
lista.append(others[i][j])
other.append(join(lista))
else:
other = others
if pars.d['GET_Z'] == 'no': get_z = 0
else: get_z = 1
#Prepare the output file
out_name = pars.d['OUTPUT']
if get_z:
if os.path.exists(out_name):
os.system('cp %s %s.bak' % (out_name, out_name))
print("File %s exists. Copying it to %s.bak" % (out_name, out_name))
output = open(out_name, 'w')
if pars.d['PROBS_LITE'] == 'no': save_probs = 0
else: save_probs = 1
if pars.d['PROBS'] == 'no': save_full_probs = 0
else: save_full_probs = 1
if pars.d['PROBS2'] == 'no': save_probs2 = 0
else: save_probs2 = 1
#Include some header information
# File name and the date...
time_stamp = time.ctime(time.time())
if get_z: output.write('## File ' + out_name + ' ' + time_stamp + '\n')
#and also the parameters used to run bpz...
if get_z: output.write("""##
##Parameters used to run BPZ:
##
""")
claves = list(pars.d.keys())
claves.sort()
for key in claves:
if type(pars.d[key]) == type((1, )):
cosa = join(list(pars.d[key]), ',')
else:
cosa = str(pars.d[key])
if get_z: output.write('##' + key.upper() + '=' + cosa + '\n')
if save_full_probs:
#Shelve some info on the run
full_probs = shelve.open(pars.d['PROBS'])
full_probs['TIME'] = time_stamp
full_probs['PARS'] = pars.d
if save_probs:
probs = open(pars.d['PROBS_LITE'], 'w')
probs.write('# ID p_bayes(z) where z=arange(%.4f,%.4f,%.4f) \n' %
(zmin, zmax + dz, dz))
if save_probs2:
probs2 = open(pars.d['PROBS2'], 'w')
probs2.write(
'# id t z1 P(z1) P(z1+dz) P(z1+2*dz) ... where dz = %.4f\n' % dz)
#probs2.write('# ID\n')
#probs2.write('# t z1 P(z1) P(z1+dz) P(z1+2*dz) ... where dz = %.4f\n' % dz)
#Use a empirical prior?
tipo_prior = pars.d['PRIOR']
useprior = 0
if 'M_0' in col_pars.d:
has_mags = 1
else:
has_mags = 0
if has_mags and tipo_prior != 'none' and tipo_prior != 'flat':
useprior = 1
#Add cluster 'spikes' to the prior?
cluster_prior = 0.
if pars.d['ZC']:
cluster_prior = 1
if type(pars.d['ZC']) == type(""): zc = array([float(pars.d['ZC'])])
else: zc = array(list(map(float, pars.d['ZC'])))
if type(pars.d['FC']) == type(""): fc = array([float(pars.d['FC'])])
else: fc = array(list(map(float, pars.d['FC'])))
fcc = add.reduce(fc)
if fcc > 1.:
print(ftc)
raise 'Too many galaxies in clusters!'
pi_c = zeros((nz, nt)) * 1.
#Go over the different cluster spikes
for i in range(len(zc)):
#We define the cluster within dz=0.01 limits
cluster_range = less_equal(abs(z - zc[i]), .01) * 1.
#Clip values to avoid overflow
exponente = clip(-(z - zc[i])**2 / 2. / (0.00333)**2, -700., 0.)
#Outside the cluster range g is 0
g = exp(exponente) * cluster_range
norm = add.reduce(g)
pi_c[:, 0] = pi_c[:, 0] + g / norm * fc[i]
#Go over the different types
print('We only apply the cluster prior to the early type galaxies')
for i in range(1, 3 + 2 * ninterp):
pi_c[:, i] = pi_c[:, i] + pi_c[:, 0]
#Output format
format = '%' + repr(maximum(5, len(id[0]))) + 's' #ID format
format = format + pars.d[
'N_PEAKS'] * ' %.3f %.3f %.3f %.3f %.5f' + ' %.3f %.3f %10.3f'
#Add header with variable names to the output file
sxhdr = """##
##Column information
##
# 1 ID"""
k = 1
if pars.d['N_PEAKS'] > 1:
for j in range(pars.d['N_PEAKS']):
sxhdr += """
# %i Z_B_%i
# %i Z_B_MIN_%i
# %i Z_B_MAX_%i
# %i T_B_%i
# %i ODDS_%i""" % (k + 1, j + 1, k + 2, j + 1, k + 3, j + 1, k + 4, j + 1,
k + 5, j + 1)
k += 5
else:
sxhdr += """
# %i Z_B
# %i Z_B_MIN
# %i Z_B_MAX
# %i T_B
# %i ODDS""" % (k + 1, k + 2, k + 3, k + 4, k + 5)
k += 5
sxhdr += """
# %i Z_ML
# %i T_ML
# %i CHI-SQUARED\n""" % (k + 1, k + 2, k + 3)
nh = k + 4
if 'Z_S' in col_pars.d:
sxhdr = sxhdr + '# %i Z_S\n' % nh
format = format + ' %.3f'
nh += 1
if has_mags:
format = format + ' %.3f'
sxhdr = sxhdr + '# %i M_0\n' % nh
nh += 1
if 'OTHER' in col_pars.d:
sxhdr = sxhdr + '# %i OTHER\n' % nh
format = format + ' %s'
nh += n_other
#print sxhdr
if get_z: output.write(sxhdr + '##\n')
odds_i = float(pars.d['ODDS'])
oi = inv_gauss_int(odds_i)
print(odds_i, oi)
#Proceed to redshift estimation
if checkSED: buffer_flux_comparison = ""
if pars.d['CONVOLVE_P'] == 'yes':
# Will Convolve with a dz=0.03 gaussian to make probabilities smoother
# This is necessary; if not there are too many close peaks
sigma_g = 0.03
x = arange(-3. * sigma_g, 3. * sigma_g + old_div(dz, 10.),
dz) # made symmetric --DC
gaus = exp(-(old_div(x, sigma_g))**2)
if pars.d["NMAX"] != None: ng = int(pars.d["NMAX"])
for ig in range(ng):
currentPercent = ig / ng * 100
status = "{:.3f}% of {} completed.".format(currentPercent, ng)
Printer(status)
#Don't run BPZ on galaxies with have z_s > z_max
#if col_pars.d.has_key('Z_S'):
# if z_s[ig]<9.9 and z_s[ig]>zmax : continue
if not get_z: continue
if pars.d['COLOR'] == 'yes':
likelihood = p_c_z_t_color(f_obs[ig, :nf], ef_obs[ig, :nf],
f_mod[:nz, :nt, :nf])
else:
likelihood = p_c_z_t(f_obs[ig, :nf], ef_obs[ig, :nf],
f_mod[:nz, :nt, :nf])
if 0:
print(f_obs[ig, :nf])
print(ef_obs[ig, :nf])
iz_ml = likelihood.i_z_ml
t_ml = likelihood.i_t_ml
red_chi2 = old_div(likelihood.min_chi2, float(nf - 1.))
#p=likelihood.Bayes_likelihood
#likelihood.various_plots()
#print 'FULL BAYESAIN LIKELIHOOD'
p = likelihood.likelihood
if not ig:
print('ML * prior -- NOT QUITE BAYESIAN')
if pars.d[
'ONLY_TYPE'] == 'yes': #Use only the redshift information, no priors
p_i = zeros((nz, nt)) * 1.
j = searchsorted(z, z_s[ig])
#print j,nt,z_s[ig]
try:
p_i[j, :] = old_div(1., float(nt))
except IndexError:
pass
else:
if useprior:
if pars.d['PRIOR'] == 'lensing':
p_i = prior(z, m_0[ig], tipo_prior, nt0, ninterp, x[ig], y[ig])
else:
p_i = prior(z, m_0[ig], tipo_prior, nt0, ninterp)
else:
p_i = old_div(ones((nz, nt), float), float(nz * nt))
if cluster_prior: p_i = (1. - fcc) * p_i + pi_c
if save_full_probs:
full_probs[id[ig]] = [z, p_i[:nz, :nt], p[:nz, :nt], red_chi2]
#Multiply the prior by the likelihood to find the final probability
pb = p_i[:nz, :nt] * p[:nz, :nt]
#plo=FramedPlot()
#for i in range(p.shape[1]):
# plo.add(Curve(z,p_i[:nz,i]/sum(sum(p_i[:nz,:]))))
#for i in range(p.shape[1]):
# plo.add(Curve(z,p[:nz,i]/sum(sum(p[:nz,:])),color='red'))
#plo.add(Curve(z,pb[:nz,-1]/sum(pb[:nz,-1]),color='blue'))
#plo.show()
#ask('More?')
#Convolve with a gaussian of width \sigma(1+z) to take into
#accout the intrinsic scatter in the redshift estimation 0.06*(1+z)
#(to be done)
#Estimate the bayesian quantities
p_bayes = add.reduce(pb[:nz, :nt], -1)
#print p_bayes.shape
#print argmax(p_bayes)
#print p_bayes[300:310]
#Convolve with a gaussian
if pars.d['CONVOLVE_P'] == 'yes' and pars.d['ONLY_TYPE'] == 'no':
#print 'GAUSS CONV'
p_bayes = convolve(p_bayes, gaus, 1)
#print 'gaus', gaus
#print p_bayes.shape
#print argmax(p_bayes)
#print p_bayes[300:310]
# Eliminate all low level features in the prob. distribution
pmax = max(p_bayes)
p_bayes = where(
greater(p_bayes, pmax * float(pars.d['P_MIN'])), p_bayes, 0.)
norm = add.reduce(p_bayes)
p_bayes = old_div(p_bayes, norm)
if specprob:
p_spec[ig, :] = match_resol(z, p_bayes, z_spec[ig, :]) * p_spec[ig, :]
norma = add.reduce(p_spec[ig, :])
if norma == 0.: norma = 1.
p_spec[ig, :] /= norma
#vyjod=tuple([id[ig]]+list(z_spec[ig,:])+list(p_spec[ig,:])+[z_s[ig],
# int(float(other[ig]))])
vyjod = tuple([id[ig]] + list(z_spec[ig, :]) + list(p_spec[ig, :]))
formato = "%s " + 5 * " %.4f"
formato += 5 * " %.3f"
#formato+=" %4f %i"
formato += "\n"
print(formato % vyjod)
specout.write(formato % vyjod)
if pars.d['N_PEAKS'] > 1:
# Identify maxima and minima in the final probability
g_max = less(p_bayes[2:], p_bayes[1:-1]) * less(p_bayes[:-2],
p_bayes[1:-1])
g_min = greater(p_bayes[2:], p_bayes[1:-1]) * greater(p_bayes[:-2],
p_bayes[1:-1])
g_min += equal(p_bayes[1:-1], 0.) * greater(p_bayes[2:], 0.)
g_min += equal(p_bayes[1:-1], 0.) * greater(p_bayes[:-2], 0.)
i_max = compress(g_max, arange(nz - 2)) + 1
i_min = compress(g_min, arange(nz - 2)) + 1
# Check that the first point and the last one are not minima or maxima,
# if they are, add them to the index arrays
if p_bayes[0] > p_bayes[1]:
i_max = concatenate([[0], i_max])
i_min = concatenate([[0], i_min])
if p_bayes[-1] > p_bayes[-2]:
i_max = concatenate([i_max, [nz - 1]])
i_min = concatenate([i_min, [nz - 1]])
if p_bayes[0] < p_bayes[1]:
i_min = concatenate([[0], i_min])
if p_bayes[-1] < p_bayes[-2]:
i_min = concatenate([i_min, [nz - 1]])
p_max = take(p_bayes, i_max)
#p_min=take(p_bayes,i_min)
p_tot = []
z_peaks = []
t_peaks = []
# Sort them by probability values
p_max, i_max = multisort(old_div(1., p_max), (p_max, i_max))
# For each maximum, define the minima which sandwich it
# Assign minima to each maximum
jm = searchsorted(i_min, i_max)
p_max = list(p_max)
for i in range(len(i_max)):
z_peaks.append([z[i_max[i]], z[i_min[jm[i] - 1]], z[i_min[jm[i]]]])
t_peaks.append(argmax(pb[i_max[i], :nt]))
p_tot.append(sum(p_bayes[i_min[jm[i] - 1]:i_min[jm[i]]]))
# print z_peaks[-1][0],f_mod[i_max[i],t_peaks[-1]-1,:nf]
if ninterp:
t_peaks = list(old_div(array(t_peaks), (1. + ninterp)))
if pars.d['MERGE_PEAKS'] == 'yes':
# Merge peaks which are very close 0.03(1+z)
merged = []
for k in range(len(z_peaks)):
for j in range(len(z_peaks)):
if j > k and k not in merged and j not in merged:
if abs(z_peaks[k][0] - z_peaks[j][0]) < 0.06 * (
1. + z_peaks[j][0]):
# Modify the element which receives the accretion
z_peaks[k][1] = minimum(z_peaks[k][1],
z_peaks[j][1])
z_peaks[k][2] = maximum(z_peaks[k][2],
z_peaks[j][2])
p_tot[k] += p_tot[j]
# Put the merged element in the list
merged.append(j)
#print merged
# Clean up
copia = p_tot[:]
for j in merged:
p_tot.remove(copia[j])
copia = z_peaks[:]
for j in merged:
z_peaks.remove(copia[j])
copia = t_peaks[:]
for j in merged:
t_peaks.remove(copia[j])
copia = p_max[:]
for j in merged:
p_max.remove(copia[j])
if sum(array(p_tot)) != 1.:
p_tot = old_div(array(p_tot), sum(array(p_tot)))
# Define the peak
iz_b = argmax(p_bayes)
zb = z[iz_b]
# OKAY, NOW THAT GAUSSIAN CONVOLUTION BUG IS FIXED
# if pars.d['ONLY_TYPE']=='yes': zb=zb-dz/2. #This corrects a small bias
# else: zb=zb-dz #This corrects another small bias --DC
#Integrate within a ~ oi*sigma interval to estimate
# the odds. (based on a sigma=pars.d['MIN_RMS']*(1+z))
#Look for the number of sigma corresponding
#to the odds_i confidence limit
zo1 = zb - oi * pars.d['MIN_RMS'] * (1. + zb)
zo2 = zb + oi * pars.d['MIN_RMS'] * (1. + zb)
if pars.d['Z_THR'] > 0:
zo1 = float(pars.d['Z_THR'])
zo2 = float(pars.d['ZMAX'])
o = odds(p_bayes[:nz], z, zo1, zo2)
# Integrate within the same odds interval to find the type
# izo1=maximum(0,searchsorted(z,zo1)-1)
# izo2=minimum(nz,searchsorted(z,zo2))
# t_b=argmax(add.reduce(p[izo1:izo2,:nt],0))
it_b = argmax(pb[iz_b, :nt])
t_b = it_b + 1
if ninterp:
tt_b = old_div(float(it_b), (1. + ninterp))
tt_ml = old_div(float(t_ml), (1. + ninterp))
else:
tt_b = it_b
tt_ml = t_ml
if max(pb[iz_b, :]) < 1e-300:
print('NO CLEAR BEST t_b; ALL PROBABILITIES ZERO')
t_b = -1.
tt_b = -1.
#print it_b, t_b, tt_b, pb.shape
if 0:
print(f_mod[iz_b, it_b, :nf])
print(min(ravel(p_i)), max(ravel(p_i)))
print(min(ravel(p)), max(ravel(p)))
print(p_i[iz_b, :])
print(p[iz_b, :])
print(p_i[iz_b, it_b]) # prior
print(p[iz_b, it_b]) # chisq
print(likelihood.likelihood[iz_b, it_b])
print(likelihood.chi2[iz_b, it_b])
print(likelihood.ftt[iz_b, it_b])
print(likelihood.foo)
print()
print('t_b', t_b)
print('iz_b', iz_b)
print('nt', nt)
print(max(ravel(pb)))
impb = argmax(ravel(pb))
impbz = old_div(impb, nt)
impbt = impb % nt
print(impb, impbz, impbt)
print(ravel(pb)[impb])
print(pb.shape, (nz, nt))
print(pb[impbz, impbt])
print(pb[iz_b, it_b])
print('z, t', z[impbz], t_b)
print(t_b)
# Redshift confidence limits
z1, z2 = interval(p_bayes[:nz], z, odds_i)
if pars.d['PHOTO_ERRORS'] == 'no':
zo1 = zb - oi * pars.d['MIN_RMS'] * (1. + zb)
zo2 = zb + oi * pars.d['MIN_RMS'] * (1. + zb)
if zo1 < z1: z1 = maximum(0., zo1)
if zo2 > z2: z2 = zo2
# Print output
if pars.d['N_PEAKS'] == 1:
salida = [id[ig], zb, z1, z2, tt_b + 1, o, z[iz_ml], tt_ml + 1,
red_chi2]
else:
salida = [id[ig]]
for k in range(pars.d['N_PEAKS']):
if k <= len(p_tot) - 1:
salida = salida + list(z_peaks[k]) + [t_peaks[k] + 1, p_tot[k]]
else:
salida += [-1., -1., -1., -1., -1.]
salida += [z[iz_ml], tt_ml + 1, red_chi2]
if 'Z_S' in col_pars.d: salida.append(z_s[ig])
if has_mags: salida.append(m_0[ig] - pars.d['DELTA_M_0'])
if 'OTHER' in col_pars.d: salida.append(other[ig])
if get_z: output.write(format % tuple(salida) + '\n')
if pars.d['VERBOSE'] == 'yes': print(format % tuple(salida))
#try:
# if sometrue(greater(z_peaks,7.5)):
# connect(z,p_bayes)
# ask('More?')
#except:
# pass
odd_check = odds_i
if checkSED:
ft = f_mod[iz_b, it_b, :]
fo = f_obs[ig, :]
efo = ef_obs[ig, :]
dfosq = (old_div((ft - fo), efo))**2
if 0:
print(ft)
print(fo)
print(efo)
print(dfosq)
pause()
factor = ft / efo / efo
ftt = add.reduce(ft * factor)
fot = add.reduce(fo * factor)
am = old_div(fot, ftt)
ft = ft * am
if 0:
print(factor)
print(ftt)
print(fot)
print(am)
print(ft)
print()
pause()
flux_comparison = [id[ig], m_0[ig], z[iz_b], t_b, am] + list(
concatenate([ft, fo, efo]))
nfc = len(flux_comparison)
format_fc = '%s %.2f %.2f %i' + (nfc - 4) * ' %.3e' + '\n'
buffer_flux_comparison = buffer_flux_comparison + format_fc % tuple(
flux_comparison)
if o >= odd_check:
# PHOTOMETRIC CALIBRATION CHECK
# Calculate flux ratios, but only for objects with ODDS >= odd_check
# (odd_check = 0.95 by default)
# otherwise, leave weight w = 0 by default
eps = 1e-10
frat[ig, :] = divsafe(fo, ft, inf=eps, nan=eps)
#fw[ig,:] = greater(fo, 0)
fw[ig, :] = divsafe(fo, efo, inf=1e8, nan=0)
fw[ig, :] = clip(fw[ig, :], 0, 100)
#print fw[ig,:]
#print
if 0:
bad = less_equal(ft, 0.)
#Avoid overflow by setting r to 0.
fo = where(bad, 0., fo)
ft = where(bad, 1., ft)
r[ig, :] = old_div(fo, ft)
try:
dm[ig, :] = -flux2mag(old_div(fo, ft))
except:
dm[ig, :] = -100
# Clip ratio between 0.01 & 100
r[ig, :] = where(greater(r[ig, :], 100.), 100., r[ig, :])
r[ig, :] = where(less_equal(r[ig, :], 0.), 0.01, r[ig, :])
#Weight by flux
w[ig, :] = where(greater(fo, 0.), 1, 0.)
#w[ig,:]=where(greater(fo,0.),fo,0.)
#print fo
#print r[ig,:]
#print
# This is no good becasue r is always > 0 (has been clipped that way)
#w[ig,:]=where(greater(r[ig,:],0.),fo,0.)
# The is bad because it would include non-detections:
#w[ig,:]=where(greater(r[ig,:],0.),1.,0.)
if save_probs:
texto = '%s ' % str(id[ig])
texto += len(p_bayes) * '%.3e ' + '\n'
probs.write(texto % tuple(p_bayes))
# pb[z,t] -> p_bayes[z]
# 1. tb are summed over
# 2. convolved with Gaussian if CONVOLVE_P
# 3. Clipped above P_MIN * max(P), where P_MIN = 0.01 by default
# 4. normalized such that sum(P(z)) = 1
if save_probs2: # P = exp(-chisq / 2)
#probs2.write('%s\n' % id[ig])
pmin = pmax * float(pars.d['P_MIN'])
#pb = where(less(pb,pmin), 0, pb)
chisq = -2 * log(pb)
for itb in range(nt):
chisqtb = chisq[:, itb]
pqual = greater(pb[:, itb], pmin)
chisqlists = seglist(chisqtb, pqual)
if len(chisqlists) == 0:
continue
#print pb[:,itb]
#print chisqlists
zz = arange(zmin, zmax + dz, dz)
zlists = seglist(zz, pqual)
for i in range(len(zlists)):
probs2.write('%s %2d %.3f ' %
(id[ig], itb + 1, zlists[i][0]))
fmt = len(chisqlists[i]) * '%4.2f ' + '\n'
probs2.write(fmt % tuple(chisqlists[i]))
#fmt = len(chisqtb) * '%4.2f '+'\n'
#probs2.write('%d ' % itb)
#probs2.write(fmt % tuple(chisqtb))
#if checkSED: open(pars.d['FLUX_COMPARISON'],'w').write(buffer_flux_comparison)
if checkSED: open(pars.d['CHECK'], 'w').write(buffer_flux_comparison)
if get_z: output.close()
#if checkSED and get_z:
if checkSED:
#try:
if 1:
if interactive:
print("")
print("")
print("PHOTOMETRIC CALIBRATION TESTS")
# See PHOTOMETRIC CALIBRATION CHECK above
#ratios=add.reduce(w*r,0)/add.reduce(w,0)
#print "Average, weighted by flux ratios f_obs/f_model for objects with odds >= %g" % odd_check
#print len(filters)*' %s' % tuple(filters)
#print nf*' % 7.3f ' % tuple(ratios)
#print "Corresponding zero point shifts"
#print nf*' % 7.3f ' % tuple(-flux2mag(ratios))
#print
fratavg = old_div(sum(fw * frat, axis=0), sum(fw, axis=0))
dmavg = -flux2mag(fratavg)
fnobj = sum(greater(fw, 0), axis=0)
#print 'fratavg', fratavg
#print 'dmavg', dmavg
#print 'fnobj', fnobj
#fnobj = sum(greater(w[:,i],0))
print(
"If the dmag are large, add them to the .columns file (zp_offset), then re-run BPZ.")
print(
"(For better results, first re-run with -ONLY_TYPE yes to fit SEDs to known spec-z.)")
print()
print(' fo/ft dmag nobj filter')
#print nf
for i in range(nf):
print('% 7.3f % 7.3f %5d %s'\
% (fratavg[i], dmavg[i], fnobj[i], filters[i]))
#% (ratios[i], -flux2mag(ratios)[i], sum(greater(w[:,i],0)), filters[i])
#print ' fo/ft dmag filter'
#for i in range(nf):
# print '% 7.3f % 7.3f %s' % (ratios[i], -flux2mag(ratios)[i], filters[i])
print(
"fo/ft = Average f_obs/f_model weighted by f_obs/ef_obs for objects with ODDS >= %g"
% odd_check)
print(
"dmag = magnitude offset which should be applied (added) to the photometry (zp_offset)")
print(
"nobj = # of galaxies considered in that filter (detected and high ODDS >= %g)"
% odd_check)
# print r
# print w
#print
#print "Number of galaxies considered (with ODDS >= %g):" % odd_check
#print ' ', sum(greater(w,0)) / float(nf)
#print '(Note a galaxy detected in only 5 / 6 filters counts as 5/6 = 0.833)'
#print sum(greater(w,0))
#This part is experimental and may not work in the general case
#print "Median color offsets for objects with odds > "+`odd_check`+" (not weighted)"
#print len(filters)*' %s' % tuple(filters)
#r=flux2mag(r)
#print nf*' %.3f ' % tuple(-median(r))
#print nf*' %.3f ' % tuple(median(dm))
#rms=[]
#efobs=[]
#for j in range(nf):
# ee=where(greater(f_obs[:,j],0.),f_obs[:,j],2.)
# zz=e_frac2mag(ef_obs[:,j]/ee)
#
# xer=arange(0.,1.,.02)
# hr=hist(abs(r[:,j]),xer)
# hee=hist(zz,xer)
# rms.append(std_log(compress(less_equal(r[:,j],1.),r[:,j])))
# zz=compress(less_equal(zz,1.),zz)
# efobs.append(sqrt(mean(zz*zz)))
#print nf*' %.3f ' % tuple(rms)
#print nf*' %.3f ' % tuple(efobs)
#print nf*' %.3f ' % tuple(sqrt(abs(array(rms)**2-array(efobs)**2)))
#except: pass
if save_full_probs: full_probs.close()
if save_probs: probs.close()
if save_probs2: probs2.close()
if plots and checkSED:
zb, zm, zb1, zb2, o, tb = get_data(out_name, (1, 6, 2, 3, 5, 4))
#Plot the comparison between z_spec and z_B
if 'Z_S' in col_pars.d:
if not interactive or ask('Compare z_B vs z_spec?'):
good = less(z_s, 9.99)
print(
'Total initial number of objects with spectroscopic redshifts= ',
sum(good))
od_th = 0.
if ask('Select for galaxy characteristics?\n'):
od_th = eval(input('Odds threshold?\n'))
good *= greater_equal(o, od_th)
t_min = eval(input('Minimum spectral type\n'))
t_max = eval(input('Maximum spectral type\n'))
good *= less_equal(tb, t_max) * greater_equal(tb, t_min)
if has_mags:
mg_min = eval(input('Bright magnitude limit?\n'))
mg_max = eval(input('Faint magnitude limit?\n'))
good = good * less_equal(m_0, mg_max) * greater_equal(
m_0, mg_min)
zmo, zso, zbo, zb1o, zb2o, tb = multicompress(good, (zm, z_s, zb,
zb1, zb2, tb))
print('Number of objects with odds > %.2f= %i ' %
(od_th, len(zbo)))
deltaz = old_div((zso - zbo), (1. + zso))
sz = stat_robust(deltaz, 3., 3)
sz.run()
outliers = greater_equal(abs(deltaz), 3. * sz.rms)
print('Number of outliers [dz >%.2f*(1+z)]=%i' %
(3. * sz.rms, add.reduce(outliers)))
catastrophic = greater_equal(deltaz * (1. + zso), 1.)
n_catast = sum(catastrophic)
print('Number of catastrophic outliers [dz >1]=', n_catast)
print('Delta z/(1+z) = %.4f +- %.4f' % (sz.median, sz.rms))
if interactive and plots:
if plots == 'pylab':
figure(2)
subplot(211)
plot(
arange(
min(zso), max(zso) + 0.01, 0.01), arange(
min(zso), max(zso) + 0.01, 0.01), "r")
errorbar(zso,
zbo, [abs(zbo - zb1o), abs(zb2o - zbo)],
fmt="bo")
xlabel(r'$z_{spec}$')
ylabel(r'$z_{bpz}$')
subplot(212)
plot(zso, zmo, "go", zso, zso, "r")
xlabel(r'$z_{spec}$')
ylabel(r'$z_{ML}$')
show()
elif plots == 'biggles':
plot = FramedPlot()
if len(zso) > 2000: symbol = 'dot'
else: symbol = 'circle'
plot.add(Points(zso, zbo, symboltype=symbol, color='blue'))
plot.add(Curve(zso, zso, linewidth=2., color='red'))
plot.add(ErrorBarsY(zso, zb1o, zb2o))
plot.xlabel = r'$z_{spec}$'
plot.ylabel = r'$z_{bpz}$'
# plot.xrange=0.,1.5
# plot.yrange=0.,1.5
plot.show()
#
plot_ml = FramedPlot()
if len(zso) > 2000: symbol = 'dot'
else: symbol = 'circle'
plot_ml.add(Points(
zso, zmo, symboltype=symbol,
color='blue'))
plot_ml.add(Curve(zso, zso, linewidth=2., color='red'))
plot_ml.xlabel = r"$z_{spec}$"
plot_ml.ylabel = r"$z_{ML}$"
plot_ml.show()
if interactive and plots and ask('Plot Bayesian photo-z histogram?'):
if plots == 'biggles':
dz = eval(input('Redshift interval?\n'))
od_th = eval(input('Odds threshold?\n'))
good = greater_equal(o, od_th)
if has_mags:
mg_min = eval(input('Bright magnitude limit?\n'))
mg_max = eval(input('Faint magnitude limit?\n'))
good = good * less_equal(m_0, mg_max) * greater_equal(m_0,
mg_min)
z = compress(good, zb)
xz = arange(zmin, zmax, dz)
hz = hist(z, xz)
plot = FramedPlot()
h = Histogram(hz, 0., dz, color='blue')
plot.add(h)
plot.xlabel = r'$z_{bpz}$'
plot.ylabel = r'$N(z_{bpz})$'
plot.show()
if ask('Want to save plot as eps file?'):
file = eval(input('File name?\n'))
if file[-2:] != 'ps': file = file + '.eps'
plot.save_as_eps(file)
if interactive and plots and ask(
'Compare colors with photometric redshifts?'):
if plots == 'biggles':
color_m = zeros((nz, nt, nf - 1)) * 1.
for i in range(nf - 1):
plot = FramedPlot()
# Check for overflows
fmu = f_obs[:, i + 1]
fml = f_obs[:, i]
good = greater(fml, 1e-100) * greater(fmu, 1e-100)
zz, fmu, fml = multicompress(good, (zb, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
d = Points(zz, colour, color='blue')
plot.add(d)
for it in range(nt):
#Prevent overflows
fmu = f_mod[:, it, i + 1]
fml = f_mod[:, it, i]
good = greater(fml, 1e-100)
zz, fmu, fml = multicompress(good, (z, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
d = Curve(zz, colour, color='red')
plot.add(d)
plot.xlabel = r'$z$'
plot.ylabel = '%s - %s' % (filters[i], filters[i + 1])
plot.save_as_eps('%s-%s.eps' % (filters[i], filters[i + 1]))
plot.show()
rolex.check()
| mit |
kylerbrown/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
larsmans/scipy | scipy/stats/_discrete_distns.py | 6 | 21338 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from scipy.misc import logsumexp
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
def _logsf(self, k, M, n, N):
"""
More precise calculation than log(sf)
"""
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
# Override rv_discrete._argcheck to allow mu=0.
def _argcheck(self, mu):
return mu >= 0
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = special.xlogy(k, mu) - gamln(k + 1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
mu_nonzero = tmp > 0
g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)
g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = np.inf
return 1
elif (lambda_ < 0):
self.a = -np.inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return self._random_state.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| bsd-3-clause |