repo_name
stringlengths 6
103
| path
stringlengths 5
191
| copies
stringlengths 1
4
| size
stringlengths 4
6
| content
stringlengths 986
970k
| license
stringclasses 15
values |
---|---|---|---|---|---|
vinayak-mehta/scikit-learn | sklearn/metrics/_plot/precision_recall_curve.py | 8 | 13487 | from sklearn.base import is_classifier
from .base import _get_response
from .. import average_precision_score
from .. import precision_recall_curve
from .._base import _check_pos_label_consistency
from .._classification import check_consistent_length
from ...utils import check_matplotlib_support
class PrecisionRecallDisplay:
"""Precision Recall visualization.
It is recommend to use
:func:`~sklearn.metrics.PrecisionRecallDisplay.from_estimator` or
:func:`~sklearn.metrics.PrecisionRecallDisplay.from_predictions` to create
a :class:`~sklearn.metrics.PredictionRecallDisplay`. All parameters are
stored as attributes.
Read more in the :ref:`User Guide <visualizations>`.
Parameters
----------
precision : ndarray
Precision values.
recall : ndarray
Recall values.
average_precision : float, default=None
Average precision. If None, the average precision is not shown.
estimator_name : str, default=None
Name of estimator. If None, then the estimator name is not shown.
pos_label : str or int, default=None
The class considered as the positive class. If None, the class will not
be shown in the legend.
.. versionadded:: 0.24
Attributes
----------
line_ : matplotlib Artist
Precision recall curve.
ax_ : matplotlib Axes
Axes with precision recall curve.
figure_ : matplotlib Figure
Figure containing the curve.
See Also
--------
precision_recall_curve : Compute precision-recall pairs for different
probability thresholds.
PrecisionRecallDisplay.from_estimator : Plot Precision Recall Curve given
a binary classifier.
PrecisionRecallDisplay.from_predictions : Plot Precision Recall Curve
using predictions from a binary classifier.
Notes
-----
The average precision (cf. :func:`~sklearn.metrics.average_precision`) in
scikit-learn is computed without any interpolation. To be consistent with
this metric, the precision-recall curve is plotted without any
interpolation as well (step-wise style).
You can change this style by passing the keyword argument
`drawstyle="default"` in :meth:`plot`, :meth:`from_estimator`, or
:meth:`from_predictions`. However, the curve will not be strictly
consistent with the reported average precision.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import (precision_recall_curve,
... PrecisionRecallDisplay)
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=0)
>>> clf = SVC(random_state=0)
>>> clf.fit(X_train, y_train)
SVC(random_state=0)
>>> predictions = clf.predict(X_test)
>>> precision, recall, _ = precision_recall_curve(y_test, predictions)
>>> disp = PrecisionRecallDisplay(precision=precision, recall=recall)
>>> disp.plot()
<...>
>>> plt.show()
"""
def __init__(
self,
precision,
recall,
*,
average_precision=None,
estimator_name=None,
pos_label=None,
):
self.estimator_name = estimator_name
self.precision = precision
self.recall = recall
self.average_precision = average_precision
self.pos_label = pos_label
def plot(self, ax=None, *, name=None, **kwargs):
"""Plot visualization.
Extra keyword arguments will be passed to matplotlib's `plot`.
Parameters
----------
ax : Matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
name : str, default=None
Name of precision recall curve for labeling. If `None`, use
`estimator_name` if not `None`, otherwise no labeling is shown.
**kwargs : dict
Keyword arguments to be passed to matplotlib's `plot`.
Returns
-------
display : :class:`~sklearn.metrics.PrecisionRecallDisplay`
Object that stores computed values.
Notes
-----
The average precision (cf. :func:`~sklearn.metrics.average_precision`)
in scikit-learn is computed without any interpolation. To be consistent
with this metric, the precision-recall curve is plotted without any
interpolation as well (step-wise style).
You can change this style by passing the keyword argument
`drawstyle="default"`. However, the curve will not be strictly
consistent with the reported average precision.
"""
check_matplotlib_support("PrecisionRecallDisplay.plot")
name = self.estimator_name if name is None else name
line_kwargs = {"drawstyle": "steps-post"}
if self.average_precision is not None and name is not None:
line_kwargs["label"] = f"{name} (AP = {self.average_precision:0.2f})"
elif self.average_precision is not None:
line_kwargs["label"] = f"AP = {self.average_precision:0.2f}"
elif name is not None:
line_kwargs["label"] = name
line_kwargs.update(**kwargs)
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots()
(self.line_,) = ax.plot(self.recall, self.precision, **line_kwargs)
info_pos_label = (
f" (Positive label: {self.pos_label})" if self.pos_label is not None else ""
)
xlabel = "Recall" + info_pos_label
ylabel = "Precision" + info_pos_label
ax.set(xlabel=xlabel, ylabel=ylabel)
if "label" in line_kwargs:
ax.legend(loc="lower left")
self.ax_ = ax
self.figure_ = ax.figure
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
y,
*,
sample_weight=None,
pos_label=None,
response_method="auto",
name=None,
ax=None,
**kwargs,
):
"""Plot precision-recall curve given an estimator and some data.
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
pos_label : str or int, default=None
The class considered as the positive class when computing the
precision and recall metrics. By default, `estimators.classes_[1]`
is considered as the positive class.
response_method : {'predict_proba', 'decision_function', 'auto'}, \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
name : str, default=None
Name for labeling curve. If `None`, no name is used.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
**kwargs : dict
Keyword arguments to be passed to matplotlib's `plot`.
Returns
-------
display : :class:`~sklearn.metrics.PrecisionRecallDisplay`
See Also
--------
PrecisionRecallDisplay.from_predictions : Plot precision-recall curve
using estimated probabilities or output of decision function.
Notes
-----
The average precision (cf. :func:`~sklearn.metrics.average_precision`)
in scikit-learn is computed without any interpolation. To be consistent
with this metric, the precision-recall curve is plotted without any
interpolation as well (step-wise style).
You can change this style by passing the keyword argument
`drawstyle="default"`. However, the curve will not be strictly
consistent with the reported average precision.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import PrecisionRecallDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = LogisticRegression()
>>> clf.fit(X_train, y_train)
LogisticRegression()
>>> PrecisionRecallDisplay.from_estimator(
... clf, X_test, y_test)
<...>
>>> plt.show()
"""
method_name = f"{cls.__name__}.from_estimator"
check_matplotlib_support(method_name)
if not is_classifier(estimator):
raise ValueError(f"{method_name} only supports classifiers")
y_pred, pos_label = _get_response(
X,
estimator,
response_method,
pos_label=pos_label,
)
name = name if name is not None else estimator.__class__.__name__
return cls.from_predictions(
y,
y_pred,
sample_weight=sample_weight,
name=name,
pos_label=pos_label,
ax=ax,
**kwargs,
)
@classmethod
def from_predictions(
cls,
y_true,
y_pred,
*,
sample_weight=None,
pos_label=None,
name=None,
ax=None,
**kwargs,
):
"""Plot precision-recall curve given binary class predictions.
Parameters
----------
y_true : array-like of shape (n_samples,)
True binary labels.
y_pred : array-like of shape (n_samples,)
Estimated probabilities or output of decision function.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
pos_label : str or int, default=None
The class considered as the positive class when computing the
precision and recall metrics.
name : str, default=None
Name for labeling curve. If `None`, name will be set to
`"Classifier"`.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is created.
**kwargs : dict
Keyword arguments to be passed to matplotlib's `plot`.
Returns
-------
display : :class:`~sklearn.metrics.PrecisionRecallDisplay`
See Also
--------
PrecisionRecallDisplay.from_estimator : Plot precision-recall curve
using an estimator.
Notes
-----
The average precision (cf. :func:`~sklearn.metrics.average_precision`)
in scikit-learn is computed without any interpolation. To be consistent
with this metric, the precision-recall curve is plotted without any
interpolation as well (step-wise style).
You can change this style by passing the keyword argument
`drawstyle="default"`. However, the curve will not be strictly
consistent with the reported average precision.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import PrecisionRecallDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = LogisticRegression()
>>> clf.fit(X_train, y_train)
LogisticRegression()
>>> y_pred = clf.predict_proba(X_test)[:, 1]
>>> PrecisionRecallDisplay.from_predictions(
... y_test, y_pred)
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_predictions")
check_consistent_length(y_true, y_pred, sample_weight)
pos_label = _check_pos_label_consistency(pos_label, y_true)
precision, recall, _ = precision_recall_curve(
y_true, y_pred, pos_label=pos_label, sample_weight=sample_weight
)
average_precision = average_precision_score(
y_true, y_pred, pos_label=pos_label, sample_weight=sample_weight
)
name = name if name is not None else "Classifier"
viz = PrecisionRecallDisplay(
precision=precision,
recall=recall,
average_precision=average_precision,
estimator_name=name,
pos_label=pos_label,
)
return viz.plot(ax=ax, name=name, **kwargs)
| bsd-3-clause |
Adai0808/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 290 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
oshadura/GVgenetic | simple-pca/fa_vs_pca.py | 1 | 3270 | print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| gpl-2.0 |
sangwook236/SWL | python/test/machine_learning/average_models.py | 2 | 2327 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as np
import torch
def save_model(model_filepath, model, logger=None):
#torch.save(model.state_dict(), model_filepath)
torch.save({'state_dict': model.state_dict()}, model_filepath)
if logger: logger.info('Saved a model to {}.'.format(model_filepath))
def load_model(model_filepath, model, device='cpu'):
loaded_data = torch.load(model_filepath, map_location=device)
#model.load_state_dict(loaded_data)
model.load_state_dict(loaded_data['state_dict'])
print('Loaded a model from {}.'.format(model_filepath))
return model
def build_model():
# TODO [implement] >>
raise NotImplementedError
# Model averaging:
# The paper averages the last k checkpoints to create an ensembling effect.
def average_models(model, models):
for ps in zip(*[mdl.params() for mdl in [model] + models]):
ps[0].copy_(torch.sum(*ps[1:]) / len(ps[1:]))
def simple_model_averaging_example():
gpu = -1
device = torch.device(('cuda:{}'.format(gpu) if gpu >= 0 else 'cuda') if torch.cuda.is_available() else 'cpu')
model_filepaths = [
'./model_01.pth',
'./model_02.pth'
]
averaged_model_filepath = './averaged_model.pth'
models = list()
for mdl_fpath in model_filepaths:
mdl = build_model()
models.append(load_model(mdl_fpath, mdl, device))
averaged_model = build_model() # Averaged model.
average_models(averaged_model, models)
save_model(averaged_model_filepath, averaged_model)
inputs = None
predictions = averaged_model(inputs)
def average_predictions(models, inputs):
predictions = list(mdl(inputs) for mdl in models)
predictions = np.array(predictions)
return np.average(predictions, axis=0)
#return np.sum(predictions, axis=0)
def simple_prediction_averaging_example():
gpu = -1
device = torch.device(('cuda:{}'.format(gpu) if gpu >= 0 else 'cuda') if torch.cuda.is_available() else 'cpu')
model_filepaths = [
'./model_01.pth',
'./model_02.pth'
]
models = list()
for mdl_fpath in model_filepaths:
mdl = build_model()
models.append(load_model(mdl_fpath, mdl, device))
inputs = None
predictions = average_predictions(models, inputs)
def main():
simple_model_averaging_example()
simple_prediction_averaging_example()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-3.0 |
lisa-lab/pylearn2 | pylearn2/training_algorithms/tests/test_default.py | 44 | 1798 | import numpy as np
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.models.rbm import RBM
from pylearn2.models.s3c import S3C, E_Step, Grad_M_Step
from pylearn2.training_algorithms.default import DefaultTrainingAlgorithm
from pylearn2.training_algorithms.training_algorithm import NoBatchSizeError
def test_multiple_monitoring_datasets():
# tests that DefaultTrainingAlgorithm can take multiple
# monitoring datasets.
BATCH_SIZE = 1
BATCHES = 3
dim = 4
m = 10
rng = np.random.RandomState([2014, 2, 25])
X = rng.randn(m, dim)
Y = rng.randn(m, dim)
train = DenseDesignMatrix(X=X)
test = DenseDesignMatrix(X=Y)
algorithm = DefaultTrainingAlgorithm(
batch_size=BATCH_SIZE,
batches_per_iter=BATCHES,
monitoring_dataset={'train': train, 'test': test})
model = S3C(nvis=dim, nhid=1,
irange=.01, init_bias_hid=0., init_B=1.,
min_B=1., max_B=1., init_alpha=1.,
min_alpha=1., max_alpha=1., init_mu=0.,
m_step=Grad_M_Step(learning_rate=0.),
e_step=E_Step(h_new_coeff_schedule=[1.]))
algorithm.setup(model=model, dataset=train)
algorithm.train(dataset=train)
def test_unspecified_batch_size():
# Test that failing to specify the batch size results in a
# NoBatchSizeError
m = 1
dim = 2
rng = np.random.RandomState([2014, 3, 17])
X = rng.randn(m, dim)
train = DenseDesignMatrix(X=X)
rbm = RBM(nvis=dim, nhid=3)
trainer = DefaultTrainingAlgorithm()
try:
trainer.setup(rbm, train)
except NoBatchSizeError:
return
raise AssertionError("Missed the lack of a batch size")
if __name__ == '__main__':
test_multiple_monitoring_datasets()
| bsd-3-clause |
ychfan/tensorflow | tensorflow/python/keras/_impl/keras/engine/training.py | 15 | 94748 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras training and evaluation routines.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import callbacks as cbks
from tensorflow.python.keras._impl.keras import losses
from tensorflow.python.keras._impl.keras import metrics as metrics_module
from tensorflow.python.keras._impl.keras import optimizers
from tensorflow.python.keras._impl.keras.engine.topology import Container
from tensorflow.python.keras._impl.keras.utils.data_utils import GeneratorEnqueuer
from tensorflow.python.keras._impl.keras.utils.data_utils import OrderedEnqueuer
from tensorflow.python.keras._impl.keras.utils.data_utils import Sequence
from tensorflow.python.keras._impl.keras.utils.generic_utils import Progbar
from tensorflow.python.platform import tf_logging as logging
def _standardize_input_data(data,
names,
shapes=None,
check_batch_axis=True,
exception_prefix=''):
"""Normalizes inputs and targets provided by users.
Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
Arguments:
data: User-provided input data (polymorphic).
names: List of expected array names.
shapes: Optional list of expected array shapes.
check_batch_axis: Boolean; whether to check that
the batch axis of the arrays matches the expected
value found in `shapes`.
exception_prefix: String prefix used for exception formatting.
Returns:
List of standardized input arrays (one array per model input).
Raises:
ValueError: in case of improperly formatted user-provided data.
"""
if not names:
if data is not None and hasattr(data, '__len__') and len(data):
raise ValueError('Error when checking model ' + exception_prefix + ': '
'expected no data, but got:', data)
return []
if data is None:
return [None for _ in range(len(names))]
if isinstance(data, dict):
arrays = []
for name in names:
if name not in data:
raise ValueError('No data provided for "' + name +
'". Need data for each key in: ' + str(names))
arrays.append(data[name])
elif isinstance(data, list):
if len(data) != len(names):
if data and hasattr(data[0], 'shape'):
raise ValueError(
'Error when checking model ' + exception_prefix +
': the list of Numpy arrays '
'that you are passing to your model '
'is not the size the model expected. '
'Expected to see ' + str(len(names)) + ' array(s), but instead got '
'the following list of ' + str(len(data)) + ' arrays: ' +
str(data)[:200] + '...')
else:
if len(names) == 1:
data = [np.asarray(data)]
else:
raise ValueError('Error when checking model ' + exception_prefix +
': you are passing a list as '
'input to your model, '
'but the model expects '
'a list of ' + str(len(names)) +
' Numpy arrays instead. '
'The list you passed was: ' + str(data)[:200])
arrays = data
else:
if not hasattr(data, 'shape'):
raise TypeError('Error when checking model ' + exception_prefix +
': data should be a Numpy array, '
'or list/dict of Numpy arrays. '
'Found: ' + str(data)[:200] + '...')
if len(names) > 1:
# Case: model expects multiple inputs but only received
# a single Numpy array.
raise ValueError('The model expects ' + str(len(names)) + ' ' +
exception_prefix +
' arrays, but only received one array. '
'Found: array with shape ' + str(data.shape))
arrays = [data]
# Make arrays at least 2D.
for i in range(len(names)):
array = arrays[i]
if len(array.shape) == 1:
array = np.expand_dims(array, 1)
arrays[i] = array
# Check shapes compatibility.
if shapes:
for i in range(len(names)):
if shapes[i] is None:
continue
array = arrays[i]
if len(array.shape) != len(shapes[i]):
raise ValueError(
'Error when checking ' + exception_prefix + ': expected ' + names[i]
+ ' to have ' + str(len(shapes[i])) +
' dimensions, but got array with shape ' + str(array.shape))
for j, (dim, ref_dim) in enumerate(zip(array.shape, shapes[i])):
if not j and not check_batch_axis:
# skip the first axis
continue
if ref_dim:
if ref_dim != dim:
raise ValueError('Error when checking ' + exception_prefix +
': expected ' + names[i] + ' to have shape ' +
str(shapes[i]) + ' but got array with shape ' +
str(array.shape))
return arrays
def _standardize_sample_or_class_weights(x_weight, output_names, weight_type):
"""Maps `sample_weight` or `class_weight` to model outputs.
Arguments:
x_weight: User-provided `sample_weight` or `class_weight` argument.
output_names: List of output names (strings) in the model.
weight_type: A string used purely for exception printing.
Returns:
A list of `sample_weight` or `class_weight` where there are exactly
one element per model output.
Raises:
ValueError: In case of invalid user-provided argument.
"""
if x_weight is None or len(x_weight) == 0: # pylint: disable=g-explicit-length-test
return [None for _ in output_names]
if len(output_names) == 1:
if isinstance(x_weight, list) and len(x_weight) == 1:
return x_weight
if isinstance(x_weight, dict) and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if isinstance(x_weight, list):
if len(x_weight) != len(output_names):
raise ValueError('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) + ' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if isinstance(x_weight, dict):
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise TypeError('The model has multiple outputs, so `' + weight_type + '` '
'should be either a list of a dict. '
'Provided `' + weight_type + '` type not understood: ' +
str(x_weight))
def _standardize_class_weights(class_weight, output_names):
return _standardize_sample_or_class_weights(class_weight, output_names,
'class_weight')
def _standardize_sample_weights(sample_weight, output_names):
return _standardize_sample_or_class_weights(sample_weight, output_names,
'sample_weight')
def _check_array_lengths(inputs, targets, weights=None):
"""Does user input validation for numpy arrays.
Arguments:
inputs: list of Numpy arrays of inputs.
targets: list of Numpy arrays of targets.
weights: list of Numpy arrays of sample weights.
Raises:
ValueError: in case of incorrectly formatted data.
"""
def set_of_lengths(x):
# return a set with the variation between
# different shapes, with None => 0
if x is None:
return {0}
else:
return set([0 if y is None else y.shape[0] for y in x])
set_x = set_of_lengths(inputs)
set_y = set_of_lengths(targets)
set_w = set_of_lengths(weights)
if len(set_x) > 1:
raise ValueError('All input arrays (x) should have '
'the same number of samples. Got array shapes: ' + str(
[x.shape for x in inputs]))
if len(set_y) > 1:
raise ValueError('All target arrays (y) should have '
'the same number of samples. Got array shapes: ' + str(
[y.shape for y in targets]))
if set_x and set_y and list(set_x)[0] != list(set_y)[0]:
raise ValueError('Input arrays should have '
'the same number of samples as target arrays. '
'Found ' + str(list(set_x)[0]) + ' input samples '
'and ' + str(list(set_y)[0]) + ' target samples.')
if len(set_w) > 1:
raise ValueError('All sample_weight arrays should have '
'the same number of samples. Got array shapes: ' + str(
[w.shape for w in weights]))
if set_y and set_w and list(set_y)[0] != list(set_w)[0]:
raise ValueError('Sample_weight arrays should have '
'the same number of samples as target arrays. Got ' +
str(list(set_y)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def _check_loss_and_target_compatibility(targets, loss_fns, output_shapes):
"""Does validation on the compatibility of targets and loss functions.
This helps prevent users from using loss functions incorrectly.
Arguments:
targets: list of Numpy arrays of targets.
loss_fns: list of loss functions.
output_shapes: list of shapes of model outputs.
Raises:
ValueError: if a loss function or target array
is incompatible with an output.
"""
key_losses = {
'mean_squared_error', 'binary_crossentropy', 'categorical_crossentropy'
}
for y, loss, shape in zip(targets, loss_fns, output_shapes):
if loss is None:
continue
if loss.__name__ == 'categorical_crossentropy':
if y.shape[-1] == 1:
raise ValueError('You are passing a target array of shape ' + str(
y.shape) + ' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils.np_utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
if loss.__name__ in key_losses:
for target_dim, out_dim in zip(y.shape[1:], shape[1:]):
if out_dim is not None and target_dim != out_dim:
raise ValueError('A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss.__name__ + '`. '
'This loss expects '
'targets to have the same shape '
'as the output.')
def _collect_metrics(metrics, output_names):
"""Maps metric functions to model outputs.
Arguments:
metrics: a list or dict of metric functions.
output_names: a list of the names (strings) of model outputs.
Returns:
A list (one entry per model output) of lists of metric functions.
For instance, if the model has 2 outputs, and for the first output
we want to compute "binary_accuracy" and "binary_crossentropy",
and just "binary_accuracy" for the second output,
the list would look like:
`[[binary_accuracy, binary_crossentropy], [binary_accuracy]]`
Raises:
TypeError: if an incorrect type is passed for the `metrics` argument.
"""
if not metrics:
return [[] for _ in output_names]
if isinstance(metrics, list):
# we then apply all metrics to all outputs.
return [copy.copy(metrics) for _ in output_names]
elif isinstance(metrics, dict):
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if not isinstance(output_metrics, list):
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise TypeError('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' + str(metrics))
def _batch_shuffle(index_array, batch_size):
"""Shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
Arguments:
index_array: array of indices to be shuffled.
batch_size: integer.
Returns:
The `index_array` array, shuffled in a batch-wise fashion.
"""
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def _make_batches(size, batch_size):
"""Returns a list of batch indices (tuples of indices).
Arguments:
size: Integer, total size of the data to slice into batches.
batch_size: Integer, batch size.
Returns:
A list of tuples of array indices.
"""
num_batches = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, num_batches)]
def _slice_arrays(arrays, start=None, stop=None):
"""Slice an array or list of arrays.
This takes an array-like, or a list of
array-likes, and outputs:
- arrays[start:stop] if `arrays` is an array-like
- [x[start:stop] for x in arrays] if `arrays` is a list
Can also work on list/array of indices: `_slice_arrays(x, indices)`
Arguments:
arrays: Single array or list of arrays.
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
Returns:
A slice of the array(s).
"""
if arrays is None:
return [None]
elif isinstance(arrays, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [None if x is None else x[start] for x in arrays]
else:
return [None if x is None else x[start:stop] for x in arrays]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return arrays[start]
elif hasattr(start, '__getitem__'):
return arrays[start:stop]
else:
return [None]
def _weighted_masked_objective(fn):
"""Adds support for masking and sample-weighting to an objective function.
It transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
Arguments:
fn: The objective function to wrap,
with signature `fn(y_true, y_pred)`.
Returns:
A function with signature `fn(y_true, y_pred, weights, mask)`.
"""
if fn is None:
return None
def weighted(y_true, y_pred, weights, mask=None):
"""Wrapper function.
Arguments:
y_true: `y_true` argument of `fn`.
y_pred: `y_pred` argument of `fn`.
weights: Weights tensor.
mask: Mask tensor.
Returns:
Scalar tensor.
"""
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# apply sample weighting
if weights is not None:
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def _standardize_weights(y,
sample_weight=None,
class_weight=None,
sample_weight_mode=None):
"""Performs sample weight validation and standardization.
Everything gets normalized to a single sample-wise (or timestep-wise)
weight array.
Arguments:
y: Numpy array of model targets to be weighted.
sample_weight: User-provided `sample_weight` argument.
class_weight: User-provided `class_weight` argument.
sample_weight_mode: One of `None` or `"temporal"`.
`"temporal"` indicated that we expect 2D weight data
that will be applied to the last 2 dimensions of
the targets (i.e. we are weighting timesteps, not samples).
Returns:
A numpy array of target weights, one entry per sample to weight.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise ValueError('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise ValueError('Found a sample_weight array for '
'an input with shape ' + str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify '
'sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
if len(sample_weight.shape) > len(y.shape):
raise ValueError('Found a sample_weight with shape' +
str(sample_weight.shape) + '.'
'Expected sample_weight with rank '
'less than or equal to ' + str(len(y.shape)))
if y.shape[:sample_weight.ndim] != sample_weight.shape:
raise ValueError('Found a sample_weight array with shape ' +
str(sample_weight.shape) + ' for an input with shape ' +
str(y.shape) + '. '
'sample_weight cannot be broadcast.')
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise ValueError('`class_weight` not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray(
[class_weight[cls] for cls in y_classes if cls in class_weight])
if len(weights) != len(y_classes):
# subtract the sets to pick all missing classes
existing_classes = set(y_classes)
existing_class_weight = set(class_weight.keys())
raise ValueError('`class_weight` must contain all classes in the data.'
' The classes %s exist in the data but not in '
'`class_weight`.' %
(existing_classes - existing_class_weight))
return weights
else:
if sample_weight_mode is None:
return np.ones((y.shape[0],), dtype=K.floatx())
else:
return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
class Model(Container):
"""The `Model` class adds training & evaluation routines to a `Container`.
"""
def compile(self,
optimizer,
loss,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
**kwargs):
"""Configures the model for training.
Arguments:
optimizer: String (name of optimizer) or optimizer object.
See [optimizers](/optimizers).
loss: String (name of objective function) or objective function.
See [losses](/losses).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of losses.
The loss value that will be minimized by the model
will then be the sum of all individual losses.
metrics: List of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions
of different model outputs.
The loss value that will be minimized by the model
will then be the *weighted sum* of all individual losses,
weighted by the `loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping
to the model's outputs. If a tensor, it is expected to map
output names (strings) to scalar coefficients.
sample_weight_mode: If you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
weighted_metrics: List of metrics to be evaluated and weighted
by sample_weight or class_weight during training and testing.
target_tensors: By default, Keras will create placeholders for the
model's target, which will be fed with the target data during
training. If instead you would like to use your own
target tensors (in turn, Keras will not expect external
Numpy data for these targets at training time), you
can specify them via the `target_tensors` argument. It can be
a single tensor (for a single-output model), a list of tensors,
or a dict mapping output names to target tensors.
**kwargs: When using the Theano/CNTK backends, these arguments
are passed into K.function. When using the TensorFlow backend,
these arguments are passed into `tf.Session.run`.
Raises:
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
loss = loss or {}
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = loss
self.loss_weights = loss_weights
# Prepare loss functions.
if isinstance(loss, dict):
for name in loss:
if name not in self.output_names:
raise ValueError('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
logging.warning(
'Output "' + name + '" missing from loss dictionary. '
'We assume this was done on purpose, '
'and we will not be expecting '
'any data to be passed to "' + name + '" during training.')
loss_functions.append(losses.get(loss.get(name)))
elif isinstance(loss, list):
if len(loss) != len(self.outputs):
raise ValueError('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' + str(loss))
loss_functions = [losses.get(l) for l in loss]
else:
loss_function = losses.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [_weighted_masked_objective(fn) for fn in loss_functions]
skip_target_indices = []
skip_target_weighing_indices = []
self._feed_outputs = []
self._feed_output_names = []
self._feed_output_shapes = []
self._feed_loss_fns = []
for i in range(len(weighted_losses)):
if weighted_losses[i] is None:
skip_target_indices.append(i)
skip_target_weighing_indices.append(i)
# Prepare output masks.
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if not isinstance(masks, list):
masks = [masks]
# Prepare loss weights.
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif isinstance(loss_weights, dict):
for name in loss_weights:
if name not in self.output_names:
raise ValueError('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif isinstance(loss_weights, list):
if len(loss_weights) != len(self.outputs):
raise ValueError('When passing a list as loss_weights, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
loss_weights_list = loss_weights
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) + ' - expected a list of dicts.')
# Prepare targets of model.
self.targets = []
self._feed_targets = []
if target_tensors is not None:
if isinstance(target_tensors, list):
if len(target_tensors) != len(self.outputs):
raise ValueError('When passing a list as `target_tensors`, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed target_tensors=' +
str(target_tensors))
elif isinstance(target_tensors, dict):
for name in target_tensors:
if name not in self.output_names:
raise ValueError('Unknown entry in `target_tensors` '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
target_tensors_ = []
for name in self.output_names:
target_tensors_.append(target_tensors.get(name, None))
target_tensors = target_tensors_
else:
raise TypeError('Expected `target_tensors` to be '
'a list or dict, but got:', target_tensors)
for i in range(len(self.outputs)):
if i in skip_target_indices:
self.targets.append(None)
else:
shape = self.internal_output_shapes[i]
name = self.output_names[i]
if target_tensors is not None:
target = target_tensors[i]
else:
target = None
if target is None or K.is_placeholder(target):
if target is None:
target = K.placeholder(
ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i]))
self._feed_targets.append(target)
self._feed_outputs.append(self.outputs[i])
self._feed_output_names.append(name)
self._feed_output_shapes.append(shape)
self._feed_loss_fns.append(self.loss_functions[i])
else:
skip_target_weighing_indices.append(i)
self.targets.append(target)
# Prepare sample weights.
sample_weights = []
sample_weight_modes = []
if isinstance(sample_weight_mode, dict):
for name in sample_weight_mode:
if name not in self.output_names:
raise ValueError('Unknown entry in '
'sample_weight_mode dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
for i, name in enumerate(self.output_names):
if i in skip_target_weighing_indices:
weight = None
sample_weight_modes.append(None)
else:
if name not in sample_weight_mode:
raise ValueError('Output "' + name +
'" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif isinstance(sample_weight_mode, list):
if len(sample_weight_mode) != len(self.outputs):
raise ValueError('When passing a list as sample_weight_mode, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed '
'sample_weight_mode=' + str(sample_weight_mode))
for i in range(len(self.output_names)):
if i in skip_target_weighing_indices:
weight = None
sample_weight_modes.append(None)
else:
mode = sample_weight_mode[i]
name = self.output_names[i]
if mode == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
for i, name in enumerate(self.output_names):
if i in skip_target_weighing_indices:
sample_weight_modes.append(None)
sample_weights.append(None)
else:
if sample_weight_mode == 'temporal':
sample_weights.append(
K.placeholder(ndim=2, name=name + '_sample_weights'))
sample_weight_modes.append('temporal')
else:
sample_weights.append(
K.placeholder(ndim=1, name=name + '_sample_weights'))
sample_weight_modes.append(None)
self.sample_weight_modes = sample_weight_modes
self._feed_sample_weight_modes = []
for i in range(len(self.outputs)):
if i not in skip_target_weighing_indices:
self._feed_sample_weight_modes.append(self.sample_weight_modes[i])
# Prepare metrics.
self.metrics = metrics
self.weighted_metrics = weighted_metrics
self.metrics_names = ['loss']
self.metrics_tensors = []
# Compute total loss.
total_loss = None
with K.name_scope('loss'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
with K.name_scope(self.output_names[i] + '_loss'):
output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
if total_loss is None:
if not self.losses:
raise ValueError('The model cannot be compiled '
'because it has no loss to optimize.')
else:
total_loss = 0.
# Add regularization penalties
# and other layer-specific losses.
for loss_tensor in self.losses:
total_loss += loss_tensor
# List of same size as output_names.
# contains tuples (metrics for output, names of metrics).
nested_metrics = _collect_metrics(metrics, self.output_names)
nested_weighted_metrics = _collect_metrics(weighted_metrics,
self.output_names)
def append_metric(layer_index, metric_name, metric_tensor):
"""Helper function used in loop below."""
if len(self.output_names) > 1:
metric_name = self.output_names[layer_index] + '_' + metric_name
self.metrics_names.append(metric_name)
self.metrics_tensors.append(metric_tensor)
with K.name_scope('metrics'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weights = sample_weights[i]
output_metrics = nested_metrics[i]
output_weighted_metrics = nested_weighted_metrics[i]
def handle_metrics(metrics, weights=None):
metric_name_prefix = 'weighted_' if weights is not None else ''
for metric in metrics:
if metric == 'accuracy' or metric == 'acc':
# custom handling of accuracy
# (because of class mode duality)
output_shape = self.internal_output_shapes[i]
if (output_shape[-1] == 1 or
self.loss_functions[i] == losses.binary_crossentropy):
# case: binary accuracy
acc_fn = metrics_module.binary_accuracy
elif self.loss_functions[
i] == losses.sparse_categorical_crossentropy:
# case: categorical accuracy with sparse targets
acc_fn = metrics_module.sparse_categorical_accuracy
else:
acc_fn = metrics_module.categorical_accuracy
weighted_metric_fn = _weighted_masked_objective(acc_fn)
metric_name = metric_name_prefix + 'acc'
else:
metric_fn = metrics_module.get(metric)
weighted_metric_fn = _weighted_masked_objective(metric_fn)
metric_name = metric_name_prefix + metric_fn.__name__
with K.name_scope(metric_name):
metric_result = weighted_metric_fn(
y_true, y_pred, weights=weights, mask=masks[i])
append_metric(i, metric_name, metric_result)
handle_metrics(output_metrics)
handle_metrics(output_weighted_metrics, weights=weights)
# Prepare gradient updates and state updates.
self.total_loss = total_loss
self.sample_weights = sample_weights
self._feed_sample_weights = []
for i in range(len(self.sample_weights)):
if i not in skip_target_weighing_indices:
self._feed_sample_weights.append(sample_weights[i])
# Functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
# Collected trainable weights, sorted in topological order.
trainable_weights = self.trainable_weights
self._collected_trainable_weights = trainable_weights
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
if self.train_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
with K.name_scope('training'):
with K.name_scope(self.optimizer.__class__.__name__):
training_updates = self.optimizer.get_updates(
params=self._collected_trainable_weights, loss=self.total_loss)
updates = self.updates + training_updates
# Gets loss and metrics. Updates weights at each call.
self.train_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise RuntimeError('You must compile your model before using it.')
if self.test_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
# Return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=self.state_updates,
name='test_function',
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = self._feed_inputs + [K.learning_phase()]
else:
inputs = self._feed_inputs
# Gets network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(
inputs,
self.outputs,
updates=self.state_updates,
name='predict_function',
**kwargs)
def _check_num_samples(self,
ins,
batch_size=None,
steps=None,
steps_name='steps'):
"""Determine the number of samples provided for training and evaluation.
The number of samples is not defined when running with `steps`,
in which case the number of samples is set to `None`.
Arguments:
ins: List of tensors to be fed to the Keras function.
batch_size: Integer batch size or `None` if not defined.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
steps_name: The public API's parameter name for `steps`.
Raises:
ValueError: when `steps` is `None` and the attribute `ins.shape`
does not exist. Also raises ValueError when `steps` is not `None`
and `batch_size` is not `None` because they are mutually
exclusive.
Returns:
When steps is `None`, returns the number of samples to be
processed based on the size of the first dimension of the
first input numpy array. When steps is not `None` and
`batch_size` is `None`, returns `None`.
"""
if steps is not None:
num_samples = None
if batch_size is not None:
raise ValueError('If ' + steps_name +
' is set, the `batch_size` must be None.')
elif ins and hasattr(ins[0], 'shape'):
num_samples = ins[0].shape[0]
else:
raise ValueError('Either the input data should have '
'a defined shape, or ' + steps_name +
' should be specified.')
return num_samples
def _fit_loop(self,
f,
ins,
out_labels=None,
batch_size=None,
epochs=100,
verbose=1,
callbacks=None,
val_f=None,
val_ins=None,
shuffle=True,
callback_metrics=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None):
"""Abstract fit function for `f(ins)`.
Assume that f returns a list, labeled by out_labels.
Arguments:
f: Keras function returning a list of tensors
ins: List of tensors to be fed to `f`
out_labels: List of strings, display names of
the outputs of `f`
batch_size: Integer batch size or None if unknown.
epochs: Number of times to iterate over the data
verbose: Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
val_f: Keras function to call for validation
val_ins: List of tensors to be fed to `val_f`
shuffle: Whether to shuffle the data at the beginning of each epoch
callback_metrics: List of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
validation_steps: Number of steps to run validation for (only if doing
validation from data tensors). Ignored with default value of `None`.
Returns:
`History` object.
Raises:
ValueError: In case of invalid argument values.
"""
do_validation = False
if val_f and val_ins:
do_validation = True
if (verbose and ins and
hasattr(ins[0], 'shape') and hasattr(val_ins[0], 'shape')):
print('Train on %d samples, validate on %d samples' %
(ins[0].shape[0], val_ins[0].shape[0]))
if validation_steps:
if steps_per_epoch is None:
raise ValueError('Can only use `validation_steps` when doing step-wise '
'training, i.e. `steps_per_epoch` must be set.')
do_validation = True
num_train_samples = self._check_num_samples(
ins, batch_size, steps_per_epoch, 'steps_per_epoch')
if num_train_samples is not None:
index_array = np.arange(num_train_samples)
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [self.history]
if verbose:
if steps_per_epoch is not None:
count_mode = 'steps'
else:
count_mode = 'samples'
callbacks += [cbks.ProgbarLogger(count_mode)]
callbacks = cbks.CallbackList(callbacks)
out_labels = out_labels or []
# it's possible to callback a different model than self
# (used by Sequential models)
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks.set_model(callback_model)
callbacks.set_params({
'batch_size': batch_size,
'epochs': epochs,
'steps': steps_per_epoch,
'samples': num_train_samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics or [],
})
callbacks.on_train_begin()
callback_model.stop_training = False
for cbk in callbacks:
cbk.validation_data = val_ins
for epoch in range(initial_epoch, epochs):
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
if steps_per_epoch is not None:
for step_index in range(steps_per_epoch):
batch_logs = {}
batch_logs['batch'] = step_index
batch_logs['size'] = 1
callbacks.on_batch_begin(step_index, batch_logs)
outs = f(ins)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(step_index, batch_logs)
if callback_model.stop_training:
break
if do_validation:
val_outs = self._test_loop(
val_f,
val_ins,
batch_size=batch_size,
steps=validation_steps,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
else:
if shuffle == 'batch':
index_array = _batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = _make_batches(num_train_samples, batch_size)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if callback_model.stop_training:
break
if batch_index == len(batches) - 1: # Last batch.
if do_validation:
val_outs = self._test_loop(
val_f, val_ins, batch_size=batch_size, verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
return self.history
def _predict_loop(self, f, ins, batch_size=32, verbose=0, steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
num_samples = self._check_num_samples(ins, batch_size, steps, 'steps')
if verbose == 1:
if steps is not None:
progbar = Progbar(target=steps)
else:
progbar = Progbar(target=num_samples)
if steps is not None:
# Step-based predictions.
# Since we do not know how many samples
# we will see, we cannot pre-allocate
# the returned Numpy arrays.
# Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = []
for step in range(steps):
batch_outs = f(ins)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if step == 0:
for batch_out in batch_outs:
unconcatenated_outs.append([])
for i, batch_out in enumerate(batch_outs):
unconcatenated_outs[i].append(batch_out)
if verbose == 1:
progbar.update(step)
if len(unconcatenated_outs) == 1:
return np.concatenate(unconcatenated_outs[0], axis=0)
return [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
else:
# Sample-based predictions.
outs = []
batches = _make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if ins and isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
batch_outs = f(ins_batch)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if batch_index == 0:
# Pre-allocate the results arrays.
for batch_out in batch_outs:
shape = (num_samples,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=batch_out.dtype))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def _test_loop(self, f, ins, batch_size=None, verbose=0, steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size or `None`.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
num_samples = self._check_num_samples(ins, batch_size, steps, 'steps')
outs = []
if steps is not None:
if verbose == 1:
progbar = Progbar(target=steps)
for step in range(steps):
batch_outs = f(ins)
if isinstance(batch_outs, list):
if step == 0:
for _ in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out
else:
if step == 0:
outs.append(0.)
outs[0] += batch_outs
if verbose == 1:
progbar.update(step)
for i in range(len(outs)):
outs[i] /= steps
else:
if verbose == 1:
progbar = Progbar(target=num_samples)
batches = _make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
batch_outs = f(ins_batch)
if isinstance(batch_outs, list):
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i in range(len(outs)):
outs[i] /= num_samples
if len(outs) == 1:
return outs[0]
return outs
def _standardize_user_data(self,
x,
y,
sample_weight=None,
class_weight=None,
check_batch_axis=True,
batch_size=None):
if not hasattr(self, 'optimizer'):
raise RuntimeError('You must compile a model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
output_shapes = []
for output_shape, loss_fn in zip(self._feed_output_shapes,
self._feed_loss_fns):
if loss_fn.__name__ == 'sparse_categorical_crossentropy':
output_shapes.append(output_shape[:-1] + (1,))
elif getattr(losses, loss_fn.__name__, None) is None:
output_shapes.append(None)
else:
output_shapes.append(output_shape)
x = _standardize_input_data(
x,
self._feed_input_names,
self._feed_input_shapes,
check_batch_axis=False,
exception_prefix='input')
y = _standardize_input_data(
y,
self._feed_output_names,
output_shapes,
check_batch_axis=False,
exception_prefix='target')
sample_weights = _standardize_sample_weights(sample_weight,
self._feed_output_names)
class_weights = _standardize_class_weights(class_weight,
self._feed_output_names)
sample_weights = [
_standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
self._feed_sample_weight_modes)
]
_check_array_lengths(x, y, sample_weights)
_check_loss_and_target_compatibility(y, self._feed_loss_fns,
self._feed_output_shapes)
if self.stateful and batch_size:
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def _get_deduped_metrics_names(self):
out_labels = self.metrics_names
# Rename duplicated metrics name
# (can happen with an output layer shared among multiple dataflows).
deduped_out_labels = []
for i, label in enumerate(out_labels):
new_label = label
if out_labels.count(label) > 1:
dup_idx = out_labels[:i].count(label)
new_label += '_' + str(dup_idx + 1)
deduped_out_labels.append(new_label)
return deduped_out_labels
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
Arguments:
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, it will default to 32.
epochs: Integer, the number of times to iterate
over the training data arrays.
verbose: 0, 1, or 2. Verbosity mode.
0 = silent, 1 = verbose, 2 = one log line per epoch.
callbacks: List of callbacks to be called during training.
See [callbacks](/callbacks).
validation_split: Float between 0 and 1:
fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
validation_data: Data on which to evaluate
the loss and any model metrics
at the end of each epoch. The model will not
be trained on this data.
This could be a tuple (x_val, y_val)
or a tuple (x_val, y_val, val_sample_weights).
shuffle: Boolean, whether to shuffle the training data
before each epoch. Has no effect when `steps_per_epoch`
is not `None`.
class_weight: Optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. When training with Input Tensors such as
TensorFlow data tensors, the default `None` is equal to
the number of unique samples in your dataset divided by
the batch size, or 1 if that cannot be determined.
validation_steps: Only relevant if `steps_per_epoch`
is specified. Total number of steps (batches of samples)
to validate before stopping.
Returns:
A `History` instance. Its `history` attribute contains
all information collected during training.
Raises:
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
# Backwards compatibility
if batch_size is None and steps_per_epoch is None:
batch_size = 32
if x is None and y is None and steps_per_epoch is None:
raise ValueError('If fitting from data tensors, '
'you should specify the `steps_per_epoch` '
'argument.')
# Validate user data.
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_axis=False,
batch_size=batch_size)
# Prepare validation data.
do_validation = False
val_ins = []
if validation_data:
do_validation = True
if len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError(
'When passing validation_data, '
'it must contain 2 (x_val, y_val) '
'or 3 (x_val, y_val, val_sample_weights) '
'items, however it contains %d items' % len(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x,
val_y,
sample_weight=val_sample_weight,
check_batch_axis=False,
batch_size=batch_size)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_split and 0. < validation_split < 1.:
do_validation = True
if hasattr(x[0], 'shape'):
split_at = int(x[0].shape[0] * (1. - validation_split))
else:
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (_slice_arrays(x, 0, split_at), _slice_arrays(x, split_at))
y, val_y = (_slice_arrays(y, 0, split_at), _slice_arrays(y, split_at))
sample_weights, val_sample_weights = (_slice_arrays(
sample_weights, 0, split_at), _slice_arrays(sample_weights, split_at))
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_steps:
do_validation = True
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_ins = [0.]
# Prepare input arrays and training function.
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
f = self.train_function
# Prepare display labels.
out_labels = self._get_deduped_metrics_names()
if do_validation:
self._make_test_function()
val_f = self.test_function
callback_metrics = copy.copy(out_labels) + [
'val_' + n for n in out_labels
]
else:
val_f = None
callback_metrics = copy.copy(out_labels)
# Delegate logic to `_fit_loop`.
return self._fit_loop(
f,
ins,
out_labels=out_labels,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_f=val_f,
val_ins=val_ins,
shuffle=shuffle,
callback_metrics=callback_metrics,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
def evaluate(self,
x,
y,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches.
Arguments:
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: Integer. If unspecified, it will default to 32.
verbose: Verbosity mode, 0 or 1.
sample_weight: Array of weights to weight the contribution
of different samples to the loss and metrics.
steps: Total number of steps (batches of samples)
before declaring the evaluation round finished.
Ignored with the default value of `None`.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid argument values.
"""
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
if x is None and y is None and steps is None:
raise ValueError('If evaluating from data tensors, '
'you should specify the `steps` '
'argument.')
# Validate user data.
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
check_batch_axis=False,
batch_size=batch_size)
# Prepare inputs, delegate logic to `_test_loop`.
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
f = self.test_function
return self._test_loop(
f, ins, batch_size=batch_size, verbose=verbose, steps=steps)
def predict(self, x, batch_size=None, verbose=0, steps=None):
"""Generates output predictions for the input samples.
Computation is done in batches.
Arguments:
x: The input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: Integer. If unspecified, it will default to 32.
verbose: Verbosity mode, 0 or 1.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
if x is None and steps is None:
raise ValueError('If predicting from data tensors, '
'you should specify the `steps` '
'argument.')
# Validate user data.
x = _standardize_input_data(
x,
self._feed_input_names,
self._feed_input_shapes,
check_batch_axis=False)
if self.stateful:
if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples. '
'Batch size: ' + str(batch_size) + '.')
# Prepare inputs, delegate logic to `_predict_loop`.
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + [0.]
else:
ins = x
self._make_predict_function()
f = self.predict_function
return self._predict_loop(
f, ins, batch_size=batch_size, verbose=verbose, steps=steps)
def train_on_batch(self, x, y, sample_weight=None, class_weight=None):
"""Runs a single gradient update on a single batch of data.
Arguments:
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
class_weight: Optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
Returns:
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_axis=True)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
"""Test the model on a single batch of samples.
Arguments:
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight, check_batch_axis=True)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Arguments:
x: Input samples, as a Numpy array.
Returns:
Numpy array(s) of predictions.
"""
x = _standardize_input_data(x, self._feed_input_names,
self._feed_input_shapes)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + [0.]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self,
generator,
steps_per_epoch,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0,
**kwargs):
"""Fits the model on data yielded batch-by-batch by a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
The use of `keras.utils.Sequence` guarantees the ordering
and guarantees the single use of every input per epoch when
using `use_multiprocessing=True`.
Arguments:
generator: A generator or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data
when using multiprocessing.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
steps_per_epoch: Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch. It should typically
be equal to the number of unique samples if your dataset
divided by the batch size.
epochs: Integer, total number of iterations on the data.
verbose: Verbosity mode, 0, 1, or 2.
callbacks: List of callbacks to be called during training.
validation_data: This can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
validation_steps: Only relevant if `validation_data`
is a generator. Total number of steps (batches of samples)
to yield from `generator` before stopping.
class_weight: Dictionary mapping class indices to a weight
for the class.
max_queue_size: Maximum size for the generator queue
workers: Maximum number of processes to spin up
when using process based threading
use_multiprocessing: If True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
shuffle: Whether to shuffle the data at the beginning of each
epoch. Only used with instances of `Sequence` (
keras.utils.Sequence).
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
**kwargs: support for legacy arguments.
Returns:
A `History` object.
Example:
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=10000, epochs=10)
```
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
# Legacy support
if 'max_q_size' in kwargs:
max_queue_size = kwargs.pop('max_q_size')
logging.warning('The argument `max_q_size` has been renamed '
'`max_queue_size`. Update your method calls accordingly.')
if 'pickle_safe' in kwargs:
use_multiprocessing = kwargs.pop('pickle_safe')
logging.warning('The argument `pickle_safe` has been renamed '
'`use_multiprocessing`. '
'Update your method calls accordingly.')
if kwargs:
raise ValueError('Unrecognized keyword arguments: ' + str(kwargs))
wait_time = 0.01 # in seconds
epoch = initial_epoch
do_validation = bool(validation_data)
self._make_train_function()
if do_validation:
self._make_test_function()
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__') or
isinstance(validation_data, Sequence))
if val_gen and not validation_steps:
raise ValueError('When using a generator for validation data, '
'you must specify a value for '
'`validation_steps`.')
# Prepare display labels.
out_labels = self._get_deduped_metrics_names()
callback_metrics = out_labels + ['val_' + n for n in out_labels]
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger(count_mode='steps')]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks.set_model(callback_model)
callbacks.set_params({
'epochs': epochs,
'steps': steps_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
if do_validation and not val_gen:
if len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError('`validation_data` should be a tuple '
'`(val_x, val_y, val_sample_weight)` '
'or `(val_x, val_y)`. Found: ' + str(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x, val_y, val_sample_weight)
val_data = val_x + val_y + val_sample_weights
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
val_data += [0.]
for cbk in callbacks:
cbk.validation_data = val_data
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
logging.warning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
enqueuer = None
try:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing, shuffle=shuffle)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
wait_time=wait_time)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
callback_model.stop_training = False
while epoch < epochs:
callbacks.on_epoch_begin(epoch)
steps_done = 0
batch_index = 0
while steps_done < steps_per_epoch:
generator_output = next(output_generator)
if not hasattr(generator_output, '__len__'):
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
# build batch logs
batch_logs = {}
if isinstance(x, list):
batch_size = x[0].shape[0]
elif isinstance(x, dict):
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
outs = self.train_on_batch(
x, y, sample_weight=sample_weight, class_weight=class_weight)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# Construct epoch logs.
epoch_logs = {}
batch_index += 1
steps_done += 1
# Epoch finished.
if steps_done >= steps_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(
validation_data,
validation_steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
else:
# No need for try/except because
# data has already been validated.
val_outs = self.evaluate(
val_x,
val_y,
batch_size=batch_size,
sample_weight=val_sample_weights,
verbose=0)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
if callback_model.stop_training:
break
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
finally:
if enqueuer is not None:
enqueuer.stop()
callbacks.on_train_end()
return self.history
def evaluate_generator(self,
generator,
steps,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
**kwargs):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
Arguments:
generator: Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
max_queue_size: maximum size for the generator queue
workers: maximum number of processes to spin up
when using process based threading
use_multiprocessing: if True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
**kwargs: support for legacy arguments.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
# Legacy support
if 'max_q_size' in kwargs:
max_queue_size = kwargs.pop('max_q_size')
logging.warning('The argument `max_q_size` has been renamed '
'`max_queue_size`. Update your method calls accordingly.')
if 'pickle_safe' in kwargs:
use_multiprocessing = kwargs.pop('pickle_safe')
logging.warning('The argument `pickle_safe` has been renamed '
'`use_multiprocessing`. '
'Update your method calls accordingly.')
if kwargs:
raise ValueError('Unrecognized keyword arguments: ' + str(kwargs))
self._make_test_function()
steps_done = 0
wait_time = 0.01
all_outs = []
batch_sizes = []
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
logging.warning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
enqueuer = None
try:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
wait_time=wait_time)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
while steps_done < steps:
generator_output = next(output_generator)
if not hasattr(generator_output, '__len__'):
raise ValueError('Output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('Output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
outs = self.test_on_batch(x, y, sample_weight=sample_weight)
if isinstance(x, list):
batch_size = len(x[0])
elif isinstance(x, dict):
batch_size = len(list(x.values())[0])
else:
batch_size = len(x)
if batch_size == 0:
raise ValueError('Received an empty batch. '
'Batches should at least contain one item.')
all_outs.append(outs)
steps_done += 1
batch_sizes.append(batch_size)
finally:
if enqueuer is not None:
enqueuer.stop()
if not isinstance(outs, list):
return np.average(np.asarray(all_outs), weights=batch_sizes)
else:
averages = []
for i in range(len(outs)):
averages.append(
np.average([out[i] for out in all_outs], weights=batch_sizes))
return averages
def predict_generator(self,
generator,
steps,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0,
**kwargs):
"""Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
Arguments:
generator: Generator yielding batches of input samples
or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
max_queue_size: Maximum size for the generator queue.
workers: Maximum number of processes to spin up
when using process based threading
use_multiprocessing: If `True`, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
verbose: verbosity mode, 0 or 1.
**kwargs: support for legacy arguments.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case the generator yields
data in an invalid format.
"""
# Legacy support
if 'max_q_size' in kwargs:
max_queue_size = kwargs.pop('max_q_size')
logging.warning('The argument `max_q_size` has been renamed '
'`max_queue_size`. Update your method calls accordingly.')
if 'pickle_safe' in kwargs:
use_multiprocessing = kwargs.pop('pickle_safe')
logging.warning('The argument `pickle_safe` has been renamed '
'`use_multiprocessing`. '
'Update your method calls accordingly.')
self._make_predict_function()
steps_done = 0
wait_time = 0.01
all_outs = []
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
logging.warning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
enqueuer = None
try:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing,
wait_time=wait_time)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
if verbose == 1:
progbar = Progbar(target=steps)
while steps_done < steps:
generator_output = next(output_generator)
if isinstance(generator_output, tuple):
# Compatibility with the generators
# used for training.
if len(generator_output) == 2:
x, _ = generator_output
elif len(generator_output) == 3:
x, _, _ = generator_output
else:
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
else:
# Assumes a generator that only
# yields inputs (not targets and sample weights).
x = generator_output
outs = self.predict_on_batch(x)
if not isinstance(outs, list):
outs = [outs]
if not all_outs:
for out in outs:
all_outs.append([])
for i, out in enumerate(outs):
all_outs[i].append(out)
steps_done += 1
if verbose == 1:
progbar.update(steps_done)
finally:
if enqueuer is not None:
enqueuer.stop()
if len(all_outs) == 1:
if steps_done == 1:
return all_outs[0][0]
else:
return np.concatenate(all_outs[0])
if steps_done == 1:
return [out for out in all_outs]
else:
return [np.concatenate(out) for out in all_outs]
| apache-2.0 |
spottybones/dotfiles | HOME/ipython/.config/ipython/profile_default/ipython_config.py | 1 | 22356 | # Configuration file for ipython.
#------------------------------------------------------------------------------
# InteractiveShellApp(Configurable) configuration
#------------------------------------------------------------------------------
## A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
## Execute the given command string.
#c.InteractiveShellApp.code_to_run = ''
## Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
#c.InteractiveShellApp.exec_PYTHONSTARTUP = True
## List of files to run at IPython startup.
#c.InteractiveShellApp.exec_files = []
## lines of code to run at IPython startup.
c.InteractiveShellApp.exec_lines = [
'import pandas as pd',
'import numpy as np',
'pd.set_option("display.width", 203)'
]
## A list of dotted module names of IPython extensions to load.
#c.InteractiveShellApp.extensions = []
## dotted module name of an IPython extension to load.
#c.InteractiveShellApp.extra_extension = ''
## A file to be run
#c.InteractiveShellApp.file_to_run = ''
## Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk2', 'gtk3',
# 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', 'qt4').
#c.InteractiveShellApp.gui = None
## Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
#c.InteractiveShellApp.hide_initial_ns = True
## Configure matplotlib for interactive use with the default matplotlib backend.
#c.InteractiveShellApp.matplotlib = None
## Run the module as a script.
#c.InteractiveShellApp.module_to_run = ''
## Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
#c.InteractiveShellApp.pylab = None
## If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
#c.InteractiveShellApp.pylab_import_all = True
## Reraise exceptions encountered loading IPython extensions?
#c.InteractiveShellApp.reraise_ipython_extension_failures = False
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# BaseIPythonApplication(Application) configuration
#------------------------------------------------------------------------------
## IPython: an enhanced interactive Python shell.
## Whether to create profile dir if it doesn't exist
#c.BaseIPythonApplication.auto_create = False
## Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
#c.BaseIPythonApplication.copy_config_files = False
## Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
#c.BaseIPythonApplication.extra_config_file = ''
## The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
#c.BaseIPythonApplication.ipython_dir = ''
## Whether to overwrite existing config files when copying
#c.BaseIPythonApplication.overwrite = False
## The IPython profile to use.
#c.BaseIPythonApplication.profile = 'default'
## Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
#c.BaseIPythonApplication.verbose_crash = False
#------------------------------------------------------------------------------
# TerminalIPythonApp(BaseIPythonApplication,InteractiveShellApp) configuration
#------------------------------------------------------------------------------
## Whether to display a banner upon starting IPython.
#c.TerminalIPythonApp.display_banner = True
## If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
#c.TerminalIPythonApp.force_interact = False
## Start IPython quickly by skipping the loading of config files.
#c.TerminalIPythonApp.quick = False
#------------------------------------------------------------------------------
# InteractiveShell(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## An enhanced, interactive shell for Python.
## 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
#c.InteractiveShell.ast_node_interactivity = 'last_expr'
## A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
#c.InteractiveShell.ast_transformers = []
## Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
#c.InteractiveShell.autocall = 0
## Autoindent IPython code entered interactively.
#c.InteractiveShell.autoindent = True
## Enable magic commands to be called without the leading %.
#c.InteractiveShell.automagic = True
## The part of the banner to be printed before the profile
#c.InteractiveShell.banner1 = 'Python 3.5.2 |Continuum Analytics, Inc.| (default, Jul 2 2016, 17:53:06) \nType "copyright", "credits" or "license" for more information.\n\nIPython 5.3.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
## The part of the banner to be printed after the profile
#c.InteractiveShell.banner2 = ''
## Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
#c.InteractiveShell.cache_size = 1000
## Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
#c.InteractiveShell.color_info = True
## Set the color scheme (NoColor, Neutral, Linux, or LightBG).
#c.InteractiveShell.colors = 'Neutral'
##
#c.InteractiveShell.debug = False
## **Deprecated**
#
# Will be removed in IPython 6.0
#
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). `deep_reload`
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
#c.InteractiveShell.deep_reload = False
## Don't call post-execute functions that have failed in the past.
#c.InteractiveShell.disable_failing_post_execute = False
## If True, anything that would be passed to the pager will be displayed as
# regular output instead.
#c.InteractiveShell.display_page = False
## (Provisional API) enables html representation in mime bundles sent to pagers.
#c.InteractiveShell.enable_html_pager = False
## Total length of command history
#c.InteractiveShell.history_length = 10000
## The number of saved history entries to be loaded into the history buffer at
# startup.
#c.InteractiveShell.history_load_length = 1000
##
#c.InteractiveShell.ipython_dir = ''
## Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
#c.InteractiveShell.logappend = ''
## The name of the logfile to use.
#c.InteractiveShell.logfile = ''
## Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
#c.InteractiveShell.logstart = False
##
#c.InteractiveShell.object_info_string_level = 0
## Automatically call the pdb debugger after every exception.
#c.InteractiveShell.pdb = False
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_in1 = 'In [\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_in2 = ' .\\D.: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_out = 'Out[\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompts_pad_left = True
##
#c.InteractiveShell.quiet = False
##
#c.InteractiveShell.separate_in = '\n'
##
#c.InteractiveShell.separate_out = ''
##
#c.InteractiveShell.separate_out2 = ''
## Show rewritten input, e.g. for autocall.
#c.InteractiveShell.show_rewritten_input = True
## Enables rich html representation of docstrings. (This requires the docrepr
# module).
#c.InteractiveShell.sphinxify_docstring = False
##
#c.InteractiveShell.wildcards_case_sensitive = True
##
#c.InteractiveShell.xmode = 'Context'
#------------------------------------------------------------------------------
# TerminalInteractiveShell(InteractiveShell) configuration
#------------------------------------------------------------------------------
## Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
#c.TerminalInteractiveShell.confirm_exit = True
## Options for displaying tab completions, 'column', 'multicolumn', and
# 'readlinelike'. These options are for `prompt_toolkit`, see `prompt_toolkit`
# documentation for more information.
#c.TerminalInteractiveShell.display_completions = 'multicolumn'
## Shortcut style to use at the prompt. 'vi' or 'emacs'.
c.TerminalInteractiveShell.editing_mode = 'vi'
## Set the editor used by IPython (default to $EDITOR/vi/notepad).
c.TerminalInteractiveShell.editor = 'vim'
## Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. This is
# in addition to the F2 binding, which is always enabled.
#c.TerminalInteractiveShell.extra_open_editor_shortcuts = False
## Highlight matching brackets.
#c.TerminalInteractiveShell.highlight_matching_brackets = True
## The name or class of a Pygments style to use for syntax
# highlighting:
# igor, pastie, fruity, paraiso-dark, manni, arduino, tango, lovelace, vs, borland, murphy, algol_nu, bw, native, monokai, emacs, default, abap, autumn, vim, trac, xcode, paraiso-light, colorful, friendly, perldoc, rrt, algol, rainbow_dash
#c.TerminalInteractiveShell.highlighting_style = traitlets.Undefined
## Override highlighting format for specific tokens
#c.TerminalInteractiveShell.highlighting_style_overrides = {}
## Enable mouse support in the prompt
#c.TerminalInteractiveShell.mouse_support = False
## Class used to generate Prompt token for prompt_toolkit
#c.TerminalInteractiveShell.prompts_class = 'IPython.terminal.prompts.Prompts'
## Use `raw_input` for the REPL, without completion, multiline input, and prompt
# colors.
#
# Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR.
# Known usage are: IPython own testing machinery, and emacs inferior-shell
# integration through elpy.
#
# This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment
# variable is set, or the current terminal is not a tty.
#c.TerminalInteractiveShell.simple_prompt = False
## Number of line at the bottom of the screen to reserve for the completion menu
#c.TerminalInteractiveShell.space_for_menu = 6
## Automatically set the terminal title
#c.TerminalInteractiveShell.term_title = True
## Use 24bit colors instead of 256 colors in prompt highlighting. If your
# terminal supports true color, the following command should print 'TRUECOLOR'
# in orange: printf "\x1b[38;2;255;100;0mTRUECOLOR\x1b[0m\n"
#c.TerminalInteractiveShell.true_color = False
#------------------------------------------------------------------------------
# HistoryAccessor(HistoryAccessorBase) configuration
#------------------------------------------------------------------------------
## Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
## Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
#c.HistoryAccessor.connection_options = {}
## enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
#c.HistoryAccessor.enabled = True
## Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
#
# you can also use the specific value `:memory:` (including the colon at both
# end but not the back ticks), to avoid creating an history file.
#c.HistoryAccessor.hist_file = ''
#------------------------------------------------------------------------------
# HistoryManager(HistoryAccessor) configuration
#------------------------------------------------------------------------------
## A class to organize all history-related functionality in one place.
## Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
#c.HistoryManager.db_cache_size = 0
## Should the history database include output? (default: no)
#c.HistoryManager.db_log_output = False
#------------------------------------------------------------------------------
# ProfileDir(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
## Set the profile location directly. This overrides the logic used by the
# `profile` option.
#c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# BaseFormatter(Configurable) configuration
#------------------------------------------------------------------------------
## A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
##
#c.BaseFormatter.deferred_printers = {}
##
#c.BaseFormatter.enabled = True
##
#c.BaseFormatter.singleton_printers = {}
##
#c.BaseFormatter.type_printers = {}
#------------------------------------------------------------------------------
# PlainTextFormatter(BaseFormatter) configuration
#------------------------------------------------------------------------------
## The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
##
#c.PlainTextFormatter.float_precision = ''
## Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
#c.PlainTextFormatter.max_seq_length = 1000
##
#c.PlainTextFormatter.max_width = 79
##
#c.PlainTextFormatter.newline = '\n'
##
#c.PlainTextFormatter.pprint = True
##
#c.PlainTextFormatter.verbose = False
#------------------------------------------------------------------------------
# Completer(Configurable) configuration
#------------------------------------------------------------------------------
## Activate greedy completion PENDING DEPRECTION. this is now mostly taken care
# of with Jedi.
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
#c.Completer.greedy = False
#------------------------------------------------------------------------------
# IPCompleter(Completer) configuration
#------------------------------------------------------------------------------
## Extension of the completer class with IPython-specific features
## DEPRECATED as of version 5.0.
#
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
#c.IPCompleter.limit_to__all__ = False
## Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
#c.IPCompleter.merge_completions = True
## Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
#c.IPCompleter.omit__names = 2
#------------------------------------------------------------------------------
# ScriptMagics(Magics) configuration
#------------------------------------------------------------------------------
## Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
## Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
#c.ScriptMagics.script_magics = []
## Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
#c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics(Magics) configuration
#------------------------------------------------------------------------------
## Lightweight persistence for python variables.
#
# Provides the %store magic.
## If True, any %store-d variables will be automatically restored when IPython
# starts.
#c.StoreMagics.autorestore = False
| mit |
gryffon/eggofpanku | src/settings/xmlsettings.py | 1 | 5777 | import sys
import string
import os
import xml.dom.minidom
from xml.dom.minidom import Node
from xml.dom.minidom import Document
DEFAULT_SETTINGS = {
'gamehost':'localhost',
'gameport':18072,
'last_deck':'',
'maximize':True,
'mainwindow_size':(780,560),
'playername':'Toku-san',
'cardsource':'',
'playfield_snap':True,
'dir_imagepacks':'images/cards/',
'imagepackdir_changed':False,
'playfield_bg_mode':0,
'playfield_bg_color1':(0, 206, 24),
'playfield_bg_color2':(0, 190, 16),
'playfield_bg_image':'',
'playfield_bg_image_display':False,
'attach_ok': ('personality',),
'matchuser':'',
'matchpassword':'1234',
'log_multiplayer_games':False,
'canvas_card_spacing':1,
'use_celestial_holdings':False,
'celestial_card_draw':False,
}
DEFAULT_SETTINGS_DATA_DIR = {
'data_dir': os.path.join(os.path.expanduser('~'), 'eopk'),
}
class _XMLSettings:
def __init__(self, xmlfile, defaults):
self.__dict__['defaults'] = defaults
self.__dict__['_filename'] = xmlfile
self.__dict__.update(defaults)
self.LoadSettingsFile(self._filename)
self.ApplySettingsFile()
def ApplySettingsFile(self):
try:
if self.xml:
pass
except AttributeError:
self.CreateSettingsFile()
for node in self.xml.getElementsByTagName("eopk:setting"):
self.__dict__[node.getAttribute("name")] = eval(node.firstChild.nodeValue)
def __setattr__(self,newsetting,value):
try:
if self.xml:
pass
except AttributeError:
self.CreateSettingsFile()
for node in self.xml.getElementsByTagName("eopk:setting"):
if(node.getAttribute("name") == newsetting):
node.firstChild.nodeValue = repr(value)
return
print newsetting + " not found in settings: " + self.__dict__['_filename']
def CreateSettingsFile(self):
newsettings = Document()
eopksettings = newsettings.createElement("eopk:settings")
eopkSchemaLocation = newsettings.createAttributeNS("http://code.google.com/p/eopk/", "eopk:schemaLocation")
eopkSchemaLocation.nodeValue="http://code.google.com/p/eopk/ http://www.torchdragon.com/l5r/settings.xsd"
eopksettings.setAttributeNode(eopkSchemaLocation)
eopkXMLNS = newsettings.createAttributeNS("http://code.google.com/p/eopk/", "xmlns:eopk")
eopkXMLNS.nodeValue="http://code.google.com/p/eopk/"
eopksettings.setAttributeNode(eopkXMLNS)
newsettings.appendChild(eopksettings)
for k, v in self.__dict__['defaults'].items():
eopkSetting = newsettings.createElement("eopk:setting")
eopkSettingName = newsettings.createAttributeNS("http://code.google.com/p/eopk/", "name")
eopkSettingName.nodeValue = k
eopkSetting.setAttributeNode(eopkSettingName)
eopkSettingValue = newsettings.createTextNode(repr(v))
eopkSetting.appendChild(eopkSettingValue)
eopksettings.appendChild(eopkSetting)
self.__dict__['xml'] = newsettings
def WriteSettingsFile(self):
try:
if self.xml:
pass
except AttributeError:
self.CreateSettingsFile()
#Check for new settings
for k, v in self.__dict__['defaults'].items():
settingfound = False
for node in self.xml.getElementsByTagName("eopk:setting"):
if k == node.getAttribute("name"):
settingfound = True
break
if settingfound == False:
print "Setting not found: " + k
eopkSetting = self.__dict__['xml'].createElement("eopk:setting")
eopkSettingName = self.__dict__['xml'].createAttributeNS("http://code.google.com/p/eopk/", "name")
eopkSettingName.nodeValue = k
eopkSetting.setAttributeNode(eopkSettingName)
eopkSettingValue = self.__dict__['xml'].createTextNode(repr(v))
eopkSetting.appendChild(eopkSettingValue)
self.__dict__['xml'].childNodes[0].appendChild(eopkSetting)
f = file(self._filename, 'w')
self.xml.writexml(f, indent=" ", addindent=" ", newl="\n")
f.close()
self.ApplySettingsFile()
def LoadSettingsFile(self, xmlsettings):
try:
self.__dict__['xml'] = xml.dom.minidom.parse(xmlsettings)
except IOError:
print "Unable to open settings file: " + xmlsettings
return False
self.StripTextNodes(self.xml.getElementsByTagName('eopk:settings'))
def StripTextNodes(self, nodeList):
"""The XML parser will keep appending \n to each text node when it outputs the text
so we need to strip the \n characters in order to stop bloat from occurring"""
for node in nodeList:
if node.nodeType == node.ELEMENT_NODE:
self.StripTextNodes(node.childNodes)
if node.nodeType == node.TEXT_NODE:
node.data = string.strip(string.strip(node.data, '\n'))
locationsettings = _XMLSettings('location.xml', DEFAULT_SETTINGS_DATA_DIR)
settings = _XMLSettings(os.path.join(os.path.expanduser(locationsettings.data_dir), 'settings.xml'), DEFAULT_SETTINGS) | gpl-2.0 |
OFAI/hub-toolbox-python3 | tests/centering_test.py | 1 | 4214 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file is part of the HUB TOOLBOX available at
https://github.com/OFAI/hub-toolbox-python3/
The HUB TOOLBOX is licensed under the terms of the GNU GPLv3.
(c) 2016-2018, Roman Feldbauer
Austrian Research Institute for Artificial Intelligence (OFAI)
Contact: <roman.feldbauer@ofai.at>
"""
import unittest
import numpy as np
from sklearn.preprocessing import StandardScaler
from hub_toolbox.centering import centering, weighted_centering, \
localized_centering, dis_sim_global, dis_sim_local
from hub_toolbox.io import load_dexter
from hub_toolbox.hubness import hubness
from hub_toolbox.knn_classification import score
class TestCentering(unittest.TestCase):
def setUp(self):
self.distance, self.target, self.vectors = load_dexter()
def test_centering_equal_to_sklearn_centering(self):
vectors_cent = centering(self.vectors, 'vector')
scaler = StandardScaler(with_mean=True, with_std=False)
vectors_sklearn_cent = scaler.fit_transform(self.vectors)
return np.testing.assert_array_almost_equal(
vectors_cent, vectors_sklearn_cent, decimal=7)
def test_weighted_centering_with_gamma_zero_equal_centering(self):
vectors_wcent = weighted_centering(self.vectors, 'cosine', gamma=0.)
vectors_cent = centering(self.vectors, 'vector')
return np.testing.assert_array_almost_equal(
vectors_cent, vectors_wcent, decimal=7)
def test_weighted_centering_with_gamma_notzero_changes_result(self):
gamma = np.random.rand(1)
vectors_wcent = weighted_centering(self.vectors, 'cosine', gamma)
vectors_cent = centering(self.vectors, 'vector')
return self.assertNotEqual((vectors_cent - vectors_wcent).sum(), 0)
def test_localized_centering(self):
"""Test whether hubness and k-NN accuracy improve for dexter"""
h_orig = hubness(self.distance)[0]
acc_orig = score(self.distance, self.target)[0][0, 0]
sim_lcent = localized_centering(self.vectors, kappa=20, gamma=1.)
h_lcent = hubness(sim_lcent, metric='similarity')[0]
acc_lcent = score(sim_lcent, self.target, metric='similarity')[0][0, 0]
result = (h_orig / h_lcent > 1.5) & (acc_lcent - acc_orig > 0.03)
return self.assertTrue(result)
def test_localized_centering_parallel(self):
lcent_seq = localized_centering(
self.vectors, kappa=20, gamma=1., n_jobs=4)
lcent_par = localized_centering(
self.vectors, kappa=20, gamma=1., n_jobs=1)
return np.testing.assert_array_almost_equal(lcent_par, lcent_seq, 14)
def test_dis_sim_global(self):
"""Test whether hubness and k-NN accuracy improve for dexter"""
h_orig = hubness(self.distance)[0]
acc_orig = score(self.distance, self.target)[0][0, 0]
dist_dsg = dis_sim_global(self.vectors)
h_dsg = hubness(dist_dsg)[0]
acc_dsg = score(dist_dsg, self.target)[0][0, 0]
result = (h_orig / h_dsg > 2) & (acc_dsg - acc_orig > 0.07)
return self.assertTrue(result)
def test_dis_sim_local(self):
"""Test whether hubness and k-NN accuracy improve for dexter"""
#self.vectors = np.tile(self.vectors, 1)
h_orig = hubness(self.distance)[0]
acc_orig = score(self.distance, self.target)[0][0, 0]
dist_dsl = dis_sim_local(self.vectors, k=50)
h_dsl = hubness(dist_dsl)[0]
acc_dsl = score(dist_dsl, self.target)[0][0, 0]
result = (h_orig / h_dsl > 10) & (acc_dsl - acc_orig > 0.03)
return self.assertTrue(result)
def test_dis_sim_local_parallel(self):
dsl_seq = dis_sim_local(self.vectors, k=50, n_jobs=1)
dsl_par = dis_sim_local(self.vectors, k=50, n_jobs=4)
return np.testing.assert_array_almost_equal(dsl_seq, dsl_par, 14)
def test_dis_sim_local_split_parallel_(self):
X = self.vectors[:150, :]
Y = self.vectors[150:, :]
dsl_seq = dis_sim_local(X, Y, n_jobs=1)
dsl_par = dis_sim_local(X, Y, n_jobs=4)
return np.testing.assert_array_almost_equal(dsl_seq, dsl_par, 14)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
herow/planning_qgis | tests/src/python/utilities.py | 6 | 11647 | """Helper utilities for QGIS python unit tests.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Tim Sutton (tim@linfiniti.com)'
__date__ = '20/01/2011'
__copyright__ = 'Copyright 2012, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis
import os
import sys
import platform
import tempfile
from PyQt4.QtCore import QSize, QDir
from PyQt4.QtGui import QWidget
from qgis.core import (
QgsApplication,
QgsCoordinateReferenceSystem,
QgsVectorFileWriter,
QgsMapLayerRegistry,
QgsMapSettings,
QgsMapRendererParallelJob,
QgsMapRendererSequentialJob,
QgsFontUtils
)
from qgis.gui import QgsMapCanvas
from qgis_interface import QgisInterface
import hashlib
import re
from itertools import izip
import webbrowser
import subprocess
# Support python < 2.7 via unittest2 needed for expected failure decorator.
# Note that you should ignore unused import warnings here as these are imported
# from this module by other tests.
if sys.version_info[0:2] < (2, 7):
try:
from unittest2 import TestCase, expectedFailure
import unittest2 as unittest
except ImportError:
print "You should install unittest2 to run the salt tests"
sys.exit(0)
else:
from unittest import TestCase, expectedFailure
import unittest
QGISAPP = None # Static variable used to hold hand to running QGis app
CANVAS = None
PARENT = None
IFACE = None
GEOCRS = 4326 # constant for EPSG:GEOCRS Geographic CRS id
FONTSLOADED = False
def assertHashesForFile(theHashes, theFilename):
"""Assert that a files has matches one of a list of expected hashes"""
myHash = hashForFile(theFilename)
myMessage = ('Unexpected hash'
'\nGot: %s'
'\nExpected: %s'
'\nPlease check graphics %s visually '
'and add to list of expected hashes '
'if it is OK on this platform.'
% (myHash, theHashes, theFilename))
assert myHash in theHashes, myMessage
def assertHashForFile(theHash, theFilename):
"""Assert that a files has matches its expected hash"""
myHash = hashForFile(theFilename)
myMessage = ('Unexpected hash'
'\nGot: %s'
'\nExpected: %s' % (myHash, theHash))
assert myHash == theHash, myMessage
def hashForFile(theFilename):
"""Return an md5 checksum for a file"""
myPath = theFilename
myData = file(myPath).read()
myHash = hashlib.md5()
myHash.update(myData)
myHash = myHash.hexdigest()
return myHash
def getQgisTestApp():
""" Start one QGis application to test agaist
Input
NIL
Output
handle to qgis app
If QGis is already running the handle to that app will be returned
"""
global QGISAPP # pylint: disable=W0603
if QGISAPP is None:
myGuiFlag = True # All test will run qgis in gui mode
# Note: QGIS_PREFIX_PATH is evaluated in QgsApplication -
# no need to mess with it here.
QGISAPP = QgsApplication(sys.argv, myGuiFlag)
QGISAPP.initQgis()
s = QGISAPP.showSettings()
print s
global PARENT # pylint: disable=W0603
if PARENT is None:
PARENT = QWidget()
global CANVAS # pylint: disable=W0603
if CANVAS is None:
CANVAS = QgsMapCanvas(PARENT)
CANVAS.resize(QSize(400, 400))
global IFACE # pylint: disable=W0603
if IFACE is None:
# QgisInterface is a stub implementation of the QGIS plugin interface
IFACE = QgisInterface(CANVAS)
return QGISAPP, CANVAS, IFACE, PARENT
def unitTestDataPath(theSubdir=None):
"""Return the absolute path to the InaSAFE unit test data dir.
.. note:: This is not the same thing as the SVN inasafe_data dir. Rather
this is a new dataset where the test datasets are all tiny for fast
testing and the datasets live in the same repo as the code.
Args:
* theSubdir: (Optional) Additional subdir to add to the path - typically
'hazard' or 'exposure'.
"""
myPath = __file__
tmpPath = os.path.split(os.path.dirname(myPath))
myPath = os.path.split(tmpPath[0])
if theSubdir is not None:
myPath = os.path.abspath(os.path.join(myPath[0],
'testdata',
theSubdir))
else:
myPath = os.path.abspath(os.path.join(myPath[0], 'testdata'))
return myPath
def svgSymbolsPath():
return os.path.abspath(
os.path.join(unitTestDataPath(), '..', '..', 'images', 'svg'))
def setCanvasCrs(theEpsgId, theOtfpFlag=False):
"""Helper to set the crs for the CANVAS before a test is run.
Args:
* theEpsgId - Valid EPSG identifier (int)
* theOtfpFlag - whether on the fly projections should be enabled
on the CANVAS. Default to False.
"""
# Enable on-the-fly reprojection
CANVAS.mapRenderer().setProjectionsEnabled(theOtfpFlag)
# Create CRS Instance
myCrs = QgsCoordinateReferenceSystem()
myCrs.createFromId(theEpsgId, QgsCoordinateReferenceSystem.EpsgCrsId)
# Reproject all layers to WGS84 geographic CRS
CANVAS.mapRenderer().setDestinationCrs(myCrs)
def writeShape(theMemoryLayer, theFileName):
myFileName = os.path.join(str(QDir.tempPath()), theFileName)
print myFileName
# Explicitly giving all options, not really needed but nice for clarity
myErrorMessage = ''
myOptions = []
myLayerOptions = []
mySelectedOnlyFlag = False
mySkipAttributesFlag = False
myGeoCrs = QgsCoordinateReferenceSystem()
myGeoCrs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
myResult = QgsVectorFileWriter.writeAsVectorFormat(
theMemoryLayer,
myFileName,
'utf-8',
myGeoCrs,
'ESRI Shapefile',
mySelectedOnlyFlag,
myErrorMessage,
myOptions,
myLayerOptions,
mySkipAttributesFlag)
assert myResult == QgsVectorFileWriter.NoError
def compareWkt(a, b, tol=0.000001):
r0 = re.compile( "-?\d+(?:\.\d+)?(?:[eE]\d+)?" )
r1 = re.compile( "\s*,\s*" )
# compare the structure
a0 = r1.sub( ",", r0.sub( "#", a ) )
b0 = r1.sub( ",", r0.sub( "#", b ) )
if a0 != b0:
return False
# compare the numbers with given tolerance
a0 = r0.findall( a )
b0 = r0.findall( b )
if len(a0) != len(b0):
return False
for (a1,b1) in izip(a0,b0):
if abs(float(a1)-float(b1))>tol:
return False
return True
def getTempfilePath(sufx='png'):
"""
:returns: Path to empty tempfile ending in defined suffix
Caller should delete tempfile if not used
"""
tmp = tempfile.NamedTemporaryFile(
suffix=".{0}".format(sufx), delete=False)
filepath = tmp.name
tmp.close()
return filepath
def renderMapToImage(mapsettings, parallel=False):
"""
Render current map to an image, via multi-threaded renderer
:param QgsMapSettings mapsettings:
:param bool parallel: Do parallel or sequential render job
:rtype: QImage
"""
if parallel:
job = QgsMapRendererParallelJob(mapsettings)
else:
job = QgsMapRendererSequentialJob(mapsettings)
job.start()
job.waitForFinished()
return job.renderedImage()
def mapSettingsString(ms):
"""
:param QgsMapSettings mapsettings:
:rtype: str
"""
# fullExtent() causes extra call in middle of output flow; get first
full_ext = ms.visibleExtent().toString()
s = 'MapSettings...\n'
s += ' layers(): {0}\n'.format(
[unicode(QgsMapLayerRegistry.instance().mapLayer(i).name())
for i in ms.layers()])
s += ' backgroundColor(): rgba {0},{1},{2},{3}\n'.format(
ms.backgroundColor().red(), ms.backgroundColor().green(),
ms.backgroundColor().blue(), ms.backgroundColor().alpha())
s += ' selectionColor(): rgba {0},{1},{2},{3}\n'.format(
ms.selectionColor().red(), ms.selectionColor().green(),
ms.selectionColor().blue(), ms.selectionColor().alpha())
s += ' outputSize(): {0} x {1}\n'.format(
ms.outputSize().width(), ms.outputSize().height())
s += ' outputDpi(): {0}\n'.format(ms.outputDpi())
s += ' mapUnits(): {0}\n'.format(ms.mapUnits())
s += ' scale(): {0}\n'.format(ms.scale())
s += ' mapUnitsPerPixel(): {0}\n'.format(ms.mapUnitsPerPixel())
s += ' extent():\n {0}\n'.format(
ms.extent().toString().replace(' : ', '\n '))
s += ' visibleExtent():\n {0}\n'.format(
ms.visibleExtent().toString().replace(' : ', '\n '))
s += ' fullExtent():\n {0}\n'.format(full_ext.replace(' : ', '\n '))
s += ' hasCrsTransformEnabled(): {0}\n'.format(
ms.hasCrsTransformEnabled())
s += ' destinationCrs(): {0}\n'.format(
ms.destinationCrs().authid())
s += ' flag.Antialiasing: {0}\n'.format(
ms.testFlag(QgsMapSettings.Antialiasing))
s += ' flag.UseAdvancedEffects: {0}\n'.format(
ms.testFlag(QgsMapSettings.UseAdvancedEffects))
s += ' flag.ForceVectorOutput: {0}\n'.format(
ms.testFlag(QgsMapSettings.ForceVectorOutput))
s += ' flag.DrawLabeling: {0}\n'.format(
ms.testFlag(QgsMapSettings.DrawLabeling))
s += ' flag.DrawEditingInfo: {0}\n'.format(
ms.testFlag(QgsMapSettings.DrawEditingInfo))
s += ' outputImageFormat(): {0}\n'.format(ms.outputImageFormat())
return s
def getExecutablePath(exe):
"""
:param exe: Name of executable, e.g. lighttpd
:returns: Path to executable
"""
exe_exts = []
if (platform.system().lower().startswith('win') and
"PATHEXT" in os.environ):
exe_exts = os.environ["PATHEXT"].split(os.pathsep)
for path in os.environ["PATH"].split(os.pathsep):
exe_path = os.path.join(path, exe)
if os.path.exists(exe_path):
return exe_path
for ext in exe_exts:
if os.path.exists(exe_path + ext):
return exe_path
return ''
def getTestFontFamily():
return QgsFontUtils.standardTestFontFamily()
def getTestFont(style='Roman', size=12):
"""Only Roman and Bold are loaded by default
Others available: Oblique, Bold Oblique
"""
if not FONTSLOADED:
loadTestFonts()
return QgsFontUtils.getStandardTestFont(style, size)
def loadTestFonts():
if QGISAPP is None:
getQgisTestApp()
global FONTSLOADED # pylint: disable=W0603
if FONTSLOADED is False:
QgsFontUtils.loadStandardTestFonts(['Roman', 'Bold'])
msg = getTestFontFamily() + ' base test font styles could not be loaded'
res = (QgsFontUtils.fontFamilyHasStyle(getTestFontFamily(), 'Roman')
and QgsFontUtils.fontFamilyHasStyle(getTestFontFamily(), 'Bold'))
assert res, msg
FONTSLOADED = True
def openInBrowserTab(url):
if sys.platform[:3] in ('win', 'dar'):
webbrowser.open_new_tab(url)
else:
# some Linux OS pause execution on webbrowser open, so background it
cmd = 'import webbrowser;' \
'webbrowser.open_new_tab("{0}")'.format(url)
subprocess.Popen([sys.executable, "-c", cmd],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
| gpl-2.0 |
vinayak-mehta/scikit-learn | sklearn/feature_selection/tests/test_base.py | 17 | 3594 | import numpy as np
import pytest
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection._base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, accept_sparse="csc")
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[:: self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list("ABCDEFGHIJ")
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ""
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert np.int32 == sel.transform(X.astype(np.int32)).dtype
assert np.float32 == sel.transform(X.astype(np.float32)).dtype
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
with pytest.raises(ValueError):
sel.transform(np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert np.int32 == sel.transform(sparse(X).astype(np.int32)).dtype
assert np.float32 == sel.transform(sparse(X).astype(np.float32)).dtype
# Check wrong shape raises error
with pytest.raises(ValueError):
sel.transform(np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert np.int32 == sel.inverse_transform(Xt.astype(np.int32)).dtype
assert np.float32 == sel.inverse_transform(Xt.astype(np.float32)).dtype
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
with pytest.raises(ValueError):
sel.inverse_transform(np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert np.int32 == sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype
assert np.float32 == sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype
# Check wrong shape raises error
with pytest.raises(ValueError):
sel.inverse_transform(np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
ramon-oliveira/aorun | tests/test_utils.py | 1 | 1275 | import pytest
from .context import aorun
import numpy as np
from torch import Tensor
from torch.autograd import Variable
from aorun import utils
def test_to_numpy():
a = Tensor(10)
a = utils.to_numpy(a)
assert type(a) is np.ndarray
a = Variable(Tensor(10))
a = utils.to_numpy(a)
assert type(a) is np.ndarray
a = np.array([10])
a = utils.to_numpy(a)
assert type(a) is np.ndarray
with pytest.raises(ValueError) as e:
a = 'hahaha'
utils.to_numpy(a)
def test_to_tensor():
a = Tensor(10)
a = utils.to_tensor(a)
assert type(a) is Tensor
a = Variable(Tensor(10))
a = utils.to_tensor(a)
assert type(a) is Variable
a = np.array([10.0], dtype='float32')
a = utils.to_tensor(a)
assert type(a) is Tensor
with pytest.raises(ValueError) as e:
a = 'hahaha'
utils.to_tensor(a)
def test_to_variable():
a = Tensor(10)
a = utils.to_variable(a)
assert type(a) is Variable
a = Variable(Tensor(10))
a = utils.to_variable(a)
assert type(a) is Variable
a = np.array([10.0], dtype='float32')
a = utils.to_variable(a)
assert type(a) is Variable
with pytest.raises(ValueError) as e:
a = 'hahaha'
utils.to_variable(a)
| mit |
procoder317/scikit-learn | examples/neighbors/plot_species_kde.py | 280 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
tsai-kailin/kernel_proxies | KPV/utils.py | 1 | 17070 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 29 15:48:32 2020
@author: afsaneh
"""
import os,sys
import time
import numpy as np
import pandas as pd
import functools
from typing import Callable
import jax.scipy.linalg as jsla
import jax.numpy.linalg as jnla
import operator
import torch
import matplotlib.pyplot as plt
from matplotlib import cm
from typing import Dict, Any, Iterator, Tuple
from functools import partial
import random
import scipy as sp
import scipy.sparse as sps
import scipy.linalg as la
from numpy.linalg import matrix_rank
import statistics
import itertools as it
import math
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import train_test_split
import sklearn.metrics.pairwise
from sklearn.kernel_approximation import (RBFSampler,Nystroem)
from sklearn.preprocessing import StandardScaler
import numba
import jax
import jax.numpy as jnp
import jax.scipy as jsp
from jax import grad, jit, vmap
from jax import random
@jax.jit
def modist(v):
return jnp.median(v)
@jax.jit
def sum_jit(A,B):
return jnp.sum(A,B)
@jax.jit
def linear_kern(x, y):
return jnp.sum(x * y)
@jax.jit
def l2_dist(x,y):
return jnp.array((x - y)**2)
#@functools.partial(jax.jit, static_argnums=(0,1))
def identifier(x,y):
if (x!=y):
b=0
else:
b=1
return b
@functools.partial(jax.jit, static_argnums=(0))
def dist_func(func1: Callable, x,y):
return jax.vmap(lambda x1: jax.vmap(lambda y1: func1( x1, y1))(y))(x)
@jax.jit
def rbf_ker(x,y,scale=1):
dist_mat=dist_func(l2_dist,x,y)
gamma=modist(jnp.sqrt(dist_mat))
#gamma=1
#coef=1/(2*gamma**2)
coef=1/(2*scale*(gamma**2))
return jnp.exp(-coef*dist_mat)
@jax.jit
def identifier_ker(x,y):
return dist_func(identifier,x,y)
#% function h
#@jax.jit
def cal_h(params_h,a,w):
h,s2_A,s1_W, m1,m2=params_h
k_AA_r =rbf_ker (s2_A, jnp.asarray(a).reshape(1,))
k_WW_r =rbf_ker (s1_W, jnp.asarray(w).reshape(1,))
b=h.reshape(m1,m2)
return jnp.dot(jnp.dot(mat_trans(k_WW_r),b), k_AA_r) [0][0]
def cal_h_vec(params_h,a,W):
h,s2_A,s1_W, m1,m2=params_h
k_AA_r =rbf_ker (jnp.array(s2_A), jnp.asarray(a).reshape(1,)).squeeze()
k_WW_r =rbf_ker (jnp.array(s1_W), jnp.array(W)).squeeze()
b=h.reshape(m1,m2)
return jnp.dot(jnp.dot(mat_trans(k_WW_r),b), k_AA_r)
def cal_h_vecl(params_h,a,W):
h,s2_A,s1_W, m1,m2=params_h
m1_train=m1
m2_train=m2
lst_a_ker=[]
for i in s2_A.columns:
if np.issubdtype(s2_A[i],np.integer):
kern_ma =identifier_k (jnp.array(s2_A[i]) , jnp.asarray(a).reshape(1,)).reshape(m2_train,1)
else:
arr=jnp.array(s2_A[i])
kern_ma =rbf_ker (jnp.array(s2_A[i]) , jnp.asarray(a).reshape(1,)).reshape(m2_train,1)
lst_a_ker.append(kern_ma)
lst_w_ker=[]
for i in s1_W.columns:
if np.issubdtype(s1_W[i],np.integer):
kern_m = identifier_k (jnp.array(s1_W[i]) , jnp.array( W[i]))
else:
kern_m =rbf_ker (jnp.array(s1_W[i]) , jnp.array( W[i]))
lst_w_ker.append(kern_m)
def had_ker(lst_k):
hk=jnp.ones(lst_k[0].shape)
for i in range(len(lst_k)):
hk=Hadamard_prod(hk,lst_k[i])
return hk
k_AA_r=had_ker(lst_a_ker)
k_WW_r=had_ker(lst_w_ker)
b=h.reshape(m1,m2)
return mat_mul(mat_mul(mat_trans(k_WW_r),b), k_AA_r)
def cal_h_veclx(params_h,do_A,sampl_w,lst_a,sampl_x, int_lst=[]):
h,s2_AX,s1_W, m1,m2,k_ww_1=params_h
m1_train=m1
m2_train=m2
lst_a_ker=[]
lst_x_ker=[]
lst_w_ker=[]
for i in s2_AX.columns:
if i in lst_a:
if np.issubdtype(s2_AX[i],np.integer):
arr=jnp.asarray(s2_AX[i])
kern_ma =identifier_k (arr, do_A)#.reshape(m2_train,1)
elif i in int_lst:
arr=jnp.asarray(s2_AX[i])
kern_ma =identifier_k (arr, do_A)#.reshape(m2_train,1)
else:
arr=jnp.asarray(s2_AX[i])
kern_ma =rbf_ker (arr, do_A)#.reshape(m2_train,1)
lst_a_ker.append(kern_ma)
else:
if np.issubdtype(s2_AX[i],np.integer):
#arr=jnp.asarray(s2_AX[i])
kern_mx =identifier_k (jnp.asarray(s2_AX[i]) , jnp.asarray(sampl_x[i]))
elif i in int_lst:
kern_mx =identifier_k (jnp.asarray(s2_AX[i]) , jnp.asarray(sampl_x[i]))
else:
#arr=jnp.asarray(s2_AX[i])
kern_mx =rbf_ker (jnp.asarray(s2_AX[i]) , jnp.asarray(sampl_x[i]))
lst_x_ker.append(kern_mx)
for i in s1_W.columns:
if np.issubdtype(s1_W[i],np.integer):
#arr1=jnp.asarray(s1_W[i])
kern_mw = identifier_k (jnp.array(s1_W[i]), jnp.array(sampl_w[i]))
elif i in int_lst:
#arr1=jnp.asarray(s1_W[i])
kern_mw = identifier_k (jnp.array(s1_W[i]), jnp.array(sampl_w[i]))
else:
#arr1=jnp.asarray(s1_W[i])
kern_mw = rbf_ker (jnp.array(s1_W[i]), jnp.array(sampl_w[i]))
lst_w_ker.append(kern_mw)
def had_ker(lst_k):
if lst_k==[]:
hk=jnp.ones(m2).reshape(m2,1)
else:
hk=jnp.ones(lst_k[0].shape)
for i in range(len(lst_k)):
hk=Hadamard_prod(hk,lst_k[i])
return hk
k_XX_r=had_ker(lst_x_ker).mean(axis=1)
k_AA_r=had_ker(lst_a_ker).squeeze()
k_AX_r=jnp.array([k_AA_r[:,i]* k_XX_r for i in range(k_AA_r.shape[1])])
k_WW_r=had_ker(lst_w_ker).mean(axis=1)
b=h.reshape(m1,m2)
return mat_mul(k_AX_r,mat_mul(b,k_WW_r))
@jax.jit
def Hadamard_prod(A,B):
return A*B
@jax.jit
def jsla_inv(A):
return jsla.inv(A)
@jax.jit
def jnla_norm(A):
return jnla.norm(A)
@jax.jit
def kron_prod(a,b):
return jnp.kron(a,b)
@jax.jit
def modif_kron(x,y):
if (y.shape[1]!=x.shape[1]):
print("Column_number error")
else:
return jnp.array(list(jnp.kron(x[:,i], y[:,i]).T for i in list(range(y.shape[1]))))
@jax.jit
def mat_trans(A):
return jnp.transpose(A)
def regularisation_term(A,B,reg_coef,m2,m1,lamd=0.000001):
term1=reg_coef * m2 * jnp.kron(A,B)
#dim=m1*m2
#I=jnp.identity(dim)
#term2=(jnp.array(lamd).reshape(1,) * jnp.identity(dim))
return term1
@jax.jit
def cal_loocv(K, reg, y, lam):
nD = K.shape[0]
I = jnp.eye(nD)
H = I - K.dot(jsla.inv(K + lam * nD * reg))
tildeH_inv = jnp.diag(1.0 / jnp.diag(H))
return jnp.linalg.norm(tildeH_inv.dot(H.dot(y)))
def cal_l_y (K, reg, y, low=0.00001, high=10, n=500):
lam_values = np.linspace(low, high, num=n)
grid_search={}
for lam in lam_values:
grid_search[lam]=cal_loocv(K, y, lam)
return min(grid_search.items(), key=operator.itemgetter(1))[0]
@jax.jit
def cal_loocv_emb(K, kernel_y, lam):
nD = K.shape[0]
I = jnp.eye(nD)
Q = jsla.inv(K + lam * nD * I)
H = I - K.dot(Q)
tildeH_inv = jnp.diag(1.0 / jnp.diag(H))
return jnp.trace(tildeH_inv @ H @ kernel_y @ H @ tildeH_inv)
def cal_l_w (K, kernel_y, low=0.0001, high=1, n=10, abs_low=.001):
git=1e-05
lam_values = np.logspace(np.log10(low), np.log10(high), n)
tolerance=lam_values [1]-lam_values [0]
grid_search={}
for lam in lam_values:
grid_search[lam]=cal_loocv_emb(K, kernel_y, lam)
l,loo=min(grid_search.items(), key=operator.itemgetter(1))
'''while (abs(l-low)<tolerance and low> abs_low) :
low=low *.1
high=high *.1 + git
lam_values = np.linspace(low, high, n)
tolerance=lam_values [1]-lam_values [0]
grid_search={}
for lam in lam_values:
grid_search[lam]=cal_loocv_emb(K, kernel_y, lam)
l,loo=min(grid_search.items(), key=operator.itemgetter(1))
while abs(l-high)<tolerance:
low= low *10
high=high *10 +git
lam_values = jnp.linspace(low, high, n)
tolerance=lam_values [1]-lam_values [0]
grid_search={}
for lam in lam_values:
grid_search[lam]=cal_loocv_emb(K, kernel_y, lam)
l,loo=min(grid_search.items(), key=operator.itemgetter(1))'''
return l,loo
@jax.jit
def cal_loocv_alpha(K, sigma, gamma, y, lam):
nD = K.shape[0]
I = jnp.eye(nD)
H = I - mat_mul(mat_mul(K,gamma), (jsla.inv(sigma + lam * nD* I)))
tildeH_inv = jnp.diag(1.0 / jnp.diag(H))
return jnp.linalg.norm(tildeH_inv.dot(H.dot(y)))
def cal_l_yw (K, sigma, gamma, y, low=0.01, high=1, n=10, abs_low=.001):
git=1e-05
lam_values = np.logspace(np.log10(low), np.log10(high), num=n)
tolerance=lam_values [1]-lam_values [0]
grid_search={}
for lam in lam_values:
grid_search[lam]=cal_loocv_alpha(K,sigma, gamma, y, lam)
l,loo=min(grid_search.items(), key=operator.itemgetter(1))
'''
while (abs(l-low)<tolerance and low> abs_low):
low=low *.1
high=high *.1+git
lam_values = np.linspace(low, high, num=n)
tolerance=lam_values [1]-lam_values [0]
grid_search={}
for lam in lam_values:
grid_search[lam]=cal_loocv_alpha(K,sigma, gamma, y, lam)
l,loo=min(grid_search.items(), key=operator.itemgetter(1))
while abs(l-high)<tolerance:
low= low *10
high=high *10 +git
lam_values = np.linspace(low, high, num=n)
tolerance=lam_values [1]-lam_values [0]
grid_search={}
for lam in lam_values:
grid_search[lam]=cal_loocv_alpha(K,sigma, gamma, y, lam)
l,loo=min(grid_search.items(), key=operator.itemgetter(1))
'''
return l,loo
#test=pd.DataFrame(grid_search.items())
#plt.scatter(test[0],test[1])
#%Data to store for multiple uses to avoid repeating calculation
#k_ZZ_2_act
#k_WW_2_act
#k_W1W2_act
def Kernels(samp1,samp2):
k_AA_1 =rbf_ker (samp1[:,0], samp1[:,0])
k_AA_2 =rbf_ker (samp2[:,0], samp2[:,0])
k_A1A2 =rbf_ker (samp1[:,0], samp2[:,0])
k_ZZ_1 =rbf_ker (samp1[:,2], samp1[:,2])
#k_ZZ_2 =rbf_ker (samp2[:,2], samp2[:,2])
k_Z1Z2 =rbf_ker (samp1[:,2], samp2[:,2])
k_WW_1 =rbf_ker (samp1[:,3], samp1[:,3])
#k_WW_2 =rbf_ker (samp2[:,3], samp2[:,3])
#k_W1W2 =rbf_ker (samp1[:,3], samp2[:,3])
return k_AA_1, k_AA_2 , k_A1A2,k_ZZ_1 , k_Z1Z2, k_WW_1
def Kernels_n(samp1,samp2):
k_AA_1 =rbf_ker (samp1[:,0], samp1[:,0])
k_AA_2 =rbf_ker (samp2[:,0], samp2[:,0])
k_A1A2 =rbf_ker (samp1[:,0], samp2[:,0])
k_ZZ_1 =rbf_ker (samp1[:,2], samp1[:,2])
k_ZZ_2 =rbf_ker (samp2[:,2], samp2[:,2])
k_Z1Z2 =rbf_ker (samp1[:,2], samp2[:,2])
k_WW_1 =rbf_ker (samp1[:,3], samp1[:,3])
k_WW_2 =rbf_ker (samp2[:,3], samp2[:,3])
k_W1W2 =rbf_ker (samp1[:,3], samp2[:,3])
return k_AA_1, k_AA_2 , k_A1A2,k_ZZ_1 ,k_ZZ_2 , k_Z1Z2, k_WW_1, k_WW_2, k_W1W2
def is_pos_def(x):
return (np.linalg.eigvals(x), np.all(np.linalg.eigvals(x) > 0))
def cal_mse(y,ey,n):
return 1/n*np.square(y-ey)
def sample_split(key, data,n_val,n_trn, n_total):
val=jnp.split(random.permutation(key,data),
(n_val,n_val+n_trn,n_total),axis=0)[0]
train=jnp.split(random.permutation(key,data),
(n_val,n_val+n_trn,n_total),axis=0)[1]
test=jnp.split(random.permutation(key,data),
(n_val,n_val+n_trn,n_total),axis=0)[2]
return val,train,test
def sampling(A, key, n):
return jnp.split(random.permutation(key,A),(n,A.shape[0]),axis=0)[0]
@jax.jit
def mat_mul(A,B):
return jnp.matmul(A,B)
@jax.jit
def jsla_solve(A,B):
return jax.sp.linalg.solve(A, B, assume_a = 'pos')
def ace_point(key,A2,n, mu,vu,
mw, vw, my, vy, params_h,a_AY,a_WY):
causal_effect=pd.DataFrame()
A_cause=np.arange(A2.min(),A2.max(),(A2.max()-A2.min())/20)
for i in range(len(A_cause)):
counter=i*3
A=jnp.repeat(A_cause[i],n)
U=gen_U(mu,n,key[counter],vu)
W=gen_W( U , mw , n, vw, key[counter+1])
H=cal_h_vec(params_h,A_cause[i],W).reshape(n,)
Y=gen_Y(causal_Y,A_cause[i], a_AY, U, W, a_WY,my, n,vy ,key[counter+2])
y_ind_a=causal_Y(A_cause[i], a_AY)
Y_c_A=jnp.repeat(y_ind_a,n)
causal_effect=causal_effect.append(pd.DataFrame([A,W,H,Y,Y_c_A]).T, ignore_index=True)
causal_effect.columns=['A','W','H','Y','Y_c_A']
return causal_effect
def h_surf(params_h, A2, W2):
list_aw=list(it.product(jnp.arange(A2.min(),A2.max(),1),
jnp.arange(W2.min(),W2.max(),1)))
#list_aw_df=pd.DataFrame(list(list_aw))
h_surface=list((i[0],i[1],cal_h(params_h,i[0],i[1])) for i in list_aw)
#h_surface=pd.concat([pd.DataFrame(list_aw),h_O_aw],axis=1)
#h_surface.columns=['A','W','H']
return h_surface
def normaliser (K):
return (K-K.mean())/(jnp.sqrt(K.var()))
def ichol(K, err = 1):
n = K.shape[0]
d = np.array(np.diag(K))
R = np.zeros((n,n))
I = -1 * np.ones(n, dtype=int)
a = np.max(d)
j = 0
I[j] = np.argmax(d)
nu = []
while(a > err and j < n):
a = np.max(d)
I[j] = np.argmax(d)
nu.append(np.sqrt(a))
for i in range(n):
R[j,i] = (K[I[j], i] - R[:,i].dot(R[:, I[j]]))/np.sqrt(a)
d[i] -= R[j,i]*R[j,i]
j = j+1
R = R[:j,:]
return R,I
@jax.jit
def jsla_inv_svd(A):
ur, s, vh =jsla.svd(A, full_matrices=False, overwrite_a=True)
return jnp.dot(vh.transpose(),jnp.dot(jnp.diag(s**-1),ur.transpose()))
def h_surf_data(params_h, A2,W2):
list_aw=list(it.product(A2,W2))
#list_aw_df=pd.DataFrame(list(list_aw))
h_surface=list((i[0],i[1],cal_h(params_h,i[0],i[1])) for i in list_aw)
#h_surface=pd.concat([pd.DataFrame(list_aw),h_O_aw],axis=1)
#h_surface.columns=['A','W','H']
return h_surface
def ace_point_data(key,A2,n, mu,vu,
mw, vw, my, vy, params_h,a_AY,a_WY):
causal_effect=pd.DataFrame()
A_cause=np.arange(A2.min(),A2.max(),(A2.max()-A2.min())/20)
for i in range(len(A_cause)):
counter=i*3
A=jnp.repeat(A_cause[i],n)
U=gen_U(mu,n,key[counter],vu)
W=gen_W( U , mw , n, vw, key[counter+1])
H=cal_h_vec(params_h,A_cause[i],W).reshape(n,)
Y=gen_Y(A_cause[i], a_AY, U, W, a_WY,my, n,vy ,key[counter+2])
#y_ind_a=causal_Y(A_cause[i], a_AY)
#Y_c_A=jnp.repeat(y_ind_a,n)
causal_effect=causal_effect.append(pd.DataFrame([A,W,H,Y]).T, ignore_index=True)
causal_effect.columns=['A','W','H','Y']
return causal_effect
def cal_mse(cal_h_vecAW: callable,params_h,A2, W2, ):
estimated_h=cal_h_vecAW(params_h,A2, W2)
estimated_ha=jnp.average(estimated_h, axis=0)
def identifier(x,y):
if (x!=y):
b=0
else:
b=1
return b
def identifier_k(A,B):
l=list(it.product(A,B))
a=[]
for i in l:
a.append(identifier(i[0],i[1]))
return np.array(a).reshape(A.shape[0],B.shape[0])
def standardise(X):
scaler = StandardScaler()
if X.ndim == 1:
X_scaled = scaler.fit_transform(X.reshape(-1,1)).squeeze()
return X_scaled, scaler
else:
X_scaled = scaler.fit_transform(X).squeeze()
return X_scaled, scaler
def stage2_weights(Gamma_w, Sigma_inv):
n_row = Gamma_w.shape[0]
arr = [mat_mul(jnp.diag(Gamma_w[i, :]), Sigma_inv) for i in range(n_row)]
return jnp.concatenate(arr, axis=0)
def standardise(X):
scaler = StandardScaler()
if X.ndim == 1:
X_scaled = scaler.fit_transform(X.reshape(-1,1)).squeeze()
return X_scaled, scaler
else:
X_scaled = scaler.fit_transform(X).squeeze()
return X_scaled, scaler
def standardise_arr (arr=A):
return (arr-arr.mean(axis=0))/arr.std(axis=0)
| mit |
vinayak-mehta/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 7 | 12122 | # Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
from scipy import ndimage
from scipy.sparse.csgraph import connected_components
import pytest
from sklearn.utils.fixes import sp_version, parse_version
from sklearn.feature_extraction.image import (
img_to_graph,
grid_to_graph,
extract_patches_2d,
reconstruct_from_patches_2d,
PatchExtractor,
_extract_patches,
)
@pytest.fixture(scope="module")
def raccoon_face():
if sp_version.release >= parse_version("1.10").release:
pytest.importorskip("pooch")
from scipy.datasets import face
else:
from scipy.misc import face
return face(gray=True)
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert grad_x.nnz == grad_y.nnz
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(
grad_x.data[grad_x.data > 0], grad_y.data[grad_y.data > 0]
)
def test_img_to_graph_sparse():
# Check that the edges are in the right position
# when using a sparse image with a singleton component
mask = np.zeros((2, 3), dtype=bool)
mask[0, 0] = 1
mask[:, 2] = 1
x = np.zeros((2, 3))
x[0, 0] = 1
x[0, 2] = -1
x[1, 2] = -2
grad_x = img_to_graph(x, mask=mask).todense()
desired = np.array([[1, 0, 0], [0, -1, 1], [0, 1, -2]])
np.testing.assert_array_equal(grad_x, desired)
def test_grid_to_graph():
# Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size**2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert connected_components(A)[0] == 2
# check ordering
mask = np.zeros((2, 3), dtype=bool)
mask[0, 0] = 1
mask[:, 2] = 1
graph = grid_to_graph(2, 3, 1, mask=mask.ravel()).todense()
desired = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]])
np.testing.assert_array_equal(graph, desired)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert connected_components(A)[0] == 1
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=bool)
assert A.dtype == bool
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=int)
assert A.dtype == int
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float64)
assert A.dtype == np.float64
def test_connect_regions(raccoon_face):
face = raccoon_face.copy()
# subsample by 4 to reduce run time
face = face[::4, ::4]
for thr in (50, 150):
mask = face > thr
graph = img_to_graph(face, mask=mask)
assert ndimage.label(mask)[1] == connected_components(graph)[0]
def test_connect_regions_with_grid(raccoon_face):
face = raccoon_face.copy()
# subsample by 4 to reduce run time
face = face[::4, ::4]
mask = face > 50
graph = grid_to_graph(*face.shape, mask=mask)
assert ndimage.label(mask)[1] == connected_components(graph)[0]
mask = face > 150
graph = grid_to_graph(*face.shape, mask=mask, dtype=None)
assert ndimage.label(mask)[1] == connected_components(graph)[0]
def _downsampled_face():
if sp_version.release >= parse_version("1.10").release:
pytest.importorskip("pooch")
from scipy.datasets import face as raccoon_face
else:
from scipy.misc import face as raccoon_face
face = raccoon_face(gray=True)
face = face.astype(np.float32)
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face = face.astype(np.float32)
face /= 16.0
return face
def _orange_face(face=None):
face = _downsampled_face() if face is None else face
face_color = np.zeros(face.shape + (3,))
face_color[:, :, 0] = 256 - face
face_color[:, :, 1] = 256 - face / 2
face_color[:, :, 2] = 256 - face / 4
return face_color
def _make_images(face=None):
face = _downsampled_face() if face is None else face
# make a collection of faces
images = np.zeros((3,) + face.shape)
images[0] = face
images[1] = face + 1
images[2] = face + 2
return images
downsampled_face = _downsampled_face()
orange_face = _orange_face(downsampled_face)
face_collection = _make_images(downsampled_face)
def test_extract_patches_all():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert patches.shape == (expected_n_patches, p_h, p_w)
def test_extract_patches_all_color():
face = orange_face
i_h, i_w = face.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert patches.shape == (expected_n_patches, p_h, p_w, 3)
def test_extract_patches_all_rect():
face = downsampled_face
face = face[:, 32:97]
i_h, i_w = face.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert patches.shape == (expected_n_patches, p_h, p_w)
def test_extract_patches_max_patches():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w), max_patches=100)
assert patches.shape == (100, p_h, p_w)
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5)
assert patches.shape == (expected_n_patches, p_h, p_w)
with pytest.raises(ValueError):
extract_patches_2d(face, (p_h, p_w), max_patches=2.0)
with pytest.raises(ValueError):
extract_patches_2d(face, (p_h, p_w), max_patches=-1.0)
def test_extract_patch_same_size_image():
face = downsampled_face
# Request patches of the same size as image
# Should return just the single patch a.k.a. the image
patches = extract_patches_2d(face, face.shape, max_patches=2)
assert patches.shape[0] == 1
def test_extract_patches_less_than_max_patches():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 3 * i_h // 4, 3 * i_w // 4
# this is 3185
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w), max_patches=4000)
assert patches.shape == (expected_n_patches, p_h, p_w)
def test_reconstruct_patches_perfect():
face = downsampled_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_reconstruct_patches_perfect_color():
face = orange_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_patch_extractor_fit():
faces = face_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert extr == extr.fit(faces)
def test_patch_extractor_max_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(faces) * max_patches
extr = PatchExtractor(
patch_size=(p_h, p_w), max_patches=max_patches, random_state=0
)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w)
max_patches = 0.5
expected_n_patches = len(faces) * int(
(i_h - p_h + 1) * (i_w - p_w + 1) * max_patches
)
extr = PatchExtractor(
patch_size=(p_h, p_w), max_patches=max_patches, random_state=0
)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w)
def test_patch_extractor_max_patches_default():
faces = face_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(faces)
assert patches.shape == (len(faces) * 100, 19, 25)
def test_patch_extractor_all_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w)
def test_patch_extractor_color():
faces = _make_images(orange_face)
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w, 3)
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for image_shape, patch_size, patch_step, expected_view, last_patch in zip(
image_shapes, patch_sizes, patch_steps, expected_views, last_patches
):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = _extract_patches(
image, patch_shape=patch_size, extraction_step=patch_step
)
ndim = len(image_shape)
assert patches.shape[:ndim] == expected_view
last_patch_slices = tuple(
slice(i, i + j, None) for i, j in zip(last_patch, patch_size)
)
assert (
patches[(-1, None, None) * ndim] == image[last_patch_slices].squeeze()
).all()
def test_extract_patches_square():
# test same patch size for all dimensions
face = downsampled_face
i_h, i_w = face.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = _extract_patches(face, patch_shape=p)
assert patches.shape == (expected_n_patches[0], expected_n_patches[1], p, p)
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
with pytest.raises(ValueError):
extract_patches_2d(x, (4, 1))
with pytest.raises(ValueError):
extract_patches_2d(x, (1, 4))
| bsd-3-clause |
traveller59/spconv | test/test_multi_impl.py | 1 | 13343 | # Copyright 2021 Yan Yan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compare results between different algos:
CPU: simple gather-mm-scatter
Native: Fused gather-mm-scatter
ImplicitGemm: implicit gemm
"""
import time
from pathlib import Path
import numpy as np
import torch
from torch import nn
from cumm import tensorview as tv
from spconv.core import ConvAlgo
import spconv.pytorch as spconv
import pickle
from spconv.test_utils import generate_sparse_data, params_grid
class Net(nn.Module):
def __init__(self, shape, algo):
super().__init__()
pool_algo = algo
# pool_algo = ConvAlgo.Native
self.net = spconv.SparseSequential(
spconv.SubMConv3d(3, 32, 3, bias=False, indice_key="c0",
algo=algo),
spconv.SubMConv3d(32,
32,
3,
bias=False,
indice_key="c0",
algo=algo),
# # nn.BatchNorm1d(32),
# # nn.ReLU(),
spconv.SubMConv3d(32, 64, 3, bias=False, indice_key="c0",
algo=algo),
spconv.SubMConv3d(64,
64,
3,
bias=False,
indice_key="c0",
algo=algo),
# nn.BatchNorm1d(32),
# # nn.ReLU(),
spconv.SparseConv3d(64, 64, 3, 2, 1, bias=False, indice_key="m0", algo=algo),
# # spconv.SparseMaxPool3d(2, 2, algo=pool_algo),
spconv.SubMConv3d(64,
96,
3,
bias=False,
indice_key="c1",
algo=algo),
spconv.SubMConv3d(96,
96,
3,
bias=False,
indice_key="c1",
algo=algo),
# nn.BatchNorm1d(64),
# nn.ReLU(),
spconv.SparseConv3d(96, 96, 2, 2, bias=False, indice_key="m1", algo=algo),
# spconv.SparseMaxPool3d(2, 2, algo=pool_algo),
spconv.SubMConv3d(96,
128,
3,
bias=False,
indice_key="c2",
algo=algo),
spconv.SubMConv3d(128,
128,
3,
bias=False,
indice_key="c2",
algo=algo),
# nn.BatchNorm1d(128),
# nn.ReLU(),
# spconv.SparseConv3d(128, 128, 2, 2, bias=False, indice_key="m2"),
spconv.SparseMaxPool3d(2, 2, algo=pool_algo),
spconv.SubMConv3d(128,
160,
3,
bias=False,
indice_key="c3",
algo=algo),
spconv.SubMConv3d(160,
160,
3,
bias=False,
indice_key="c3",
algo=algo),
# nn.BatchNorm1d(128),
# nn.ReLU(),
# spconv.SparseConv3d(160, 160, 2, 2, bias=False, indice_key="m3"),
spconv.SparseMaxPool3d(2, 2, algo=pool_algo, indice_key="m3"),
spconv.SubMConv3d(160,
192,
3,
bias=False,
indice_key="c4",
algo=algo),
spconv.SubMConv3d(192,
192,
3,
bias=False,
indice_key="c4",
algo=algo),
# nn.BatchNorm1d(128),
# nn.ReLU(),
spconv.SparseMaxPool3d(2, 2, indice_key="m4", algo=pool_algo),
# spconv.SparseConv3d(192, 192, 2, 2, bias=False, indice_key="m4"),
spconv.SubMConv3d(192,
224,
3,
bias=False,
indice_key="c5",
algo=algo),
spconv.SubMConv3d(224,
224,
3,
bias=False,
indice_key="c5",
algo=algo),
# nn.BatchNorm1d(256),
# nn.ReLU(),
spconv.SparseInverseConv3d(224, 128, 2, indice_key="m4", bias=False, algo=algo),
# # nn.BatchNorm1d(128),
# nn.ReLU(),
spconv.SparseInverseConv3d(128, 64, 2, indice_key="m3", bias=False, algo=algo),
)
max_batch_size = 1
# grid (dense map) is used for indice generation. use pre-allocated grid can run faster.
# self.grid = None
self.shape = shape
def forward(self, features, coors, batch_size):
x = spconv.SparseConvTensor(features,
coors,
self.shape,
batch_size)
return self.net(x)
class NetLight(nn.Module):
def __init__(self, shape, algo):
super().__init__()
pool_algo = algo
# pool_algo = ConvAlgo.Native
self.net = spconv.SparseSequential(
spconv.SubMConv3d(3, 32, 3, bias=False, indice_key="c0",
algo=algo),
spconv.SubMConv3d(32,
32,
3,
bias=False,
indice_key="c0",
algo=algo),
# # nn.BatchNorm1d(32),
# # nn.ReLU(),
spconv.SubMConv3d(32, 64, 3, bias=False, indice_key="c0",
algo=algo),
spconv.SubMConv3d(64,
64,
3,
bias=False,
indice_key="c0",
algo=algo),
# nn.BatchNorm1d(32),
# # nn.ReLU(),
spconv.SparseConv3d(64, 64, 3, 2, 1, bias=False, indice_key="m0", algo=algo),
# # spconv.SparseMaxPool3d(2, 2, algo=pool_algo),
spconv.SubMConv3d(64,
96,
3,
bias=False,
indice_key="c1",
algo=algo),
spconv.SubMConv3d(96,
96,
3,
bias=False,
indice_key="c1",
algo=algo),
# nn.BatchNorm1d(64),
# nn.ReLU(),
spconv.SparseConv3d(96, 96, 2, 2, bias=False, indice_key="m1", algo=algo),
# spconv.SparseMaxPool3d(2, 2, algo=pool_algo),
spconv.SparseInverseConv3d(96, 64, 2, indice_key="m1", bias=False, algo=algo),
# # nn.BatchNorm1d(128),
# nn.ReLU(),
spconv.SparseInverseConv3d(64, 32, 3, indice_key="m0", bias=False, algo=algo),
)
max_batch_size = 1
# grid (dense map) is used for indice generation. use pre-allocated grid can run faster.
# self.grid = None
self.shape = shape
def forward(self, features, coors, batch_size):
x = spconv.SparseConvTensor(features,
coors,
self.shape,
batch_size)
return self.net(x)
def _test_multi_impl(dtype: torch.dtype):
# TODO pytorch 1.12 don't support cpu half mm, f**k pytorch
# TODO remove or release this when tf32 op is ready
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
np.random.seed(50051)
if dtype != torch.float16:
with open(Path(__file__).parent / "data" / "test_spconv.pkl", "rb") as f:
(voxels, coors, spatial_shape) = pickle.load(f)
else:
# CPU fp16 is very slow, so we use a small data here.
spatial_shape = [19, 18, 17]
sparse_dict = generate_sparse_data(spatial_shape, [1500] * 1, 3)
voxels = np.ascontiguousarray(sparse_dict["features"]).astype(
np.float32)
coors = np.ascontiguousarray(
sparse_dict["indices"][:, [3, 0, 1, 2]]).astype(np.int32)
device = torch.device("cuda:0")
device_cpu = torch.device("cpu:0")
voxels_th = torch.from_numpy(voxels).to(device_cpu).to(dtype)
coors_th = torch.from_numpy(coors).to(device_cpu).int()
voxels_th_cuda = torch.from_numpy(voxels).to(device).to(dtype)
coors_th_cuda = torch.from_numpy(coors).to(device).int()
net_cls = Net
if dtype == torch.float16:
# CPU fp16 is very slow, so we use a small network here.
net_cls = NetLight
# cpu
torch.manual_seed(50051)
net_native_cpu = net_cls(spatial_shape, ConvAlgo.Native).to(device_cpu).to(dtype)
# gpu_native
torch.manual_seed(50051)
net_native_gpu = net_cls(spatial_shape, ConvAlgo.Native).to(device).to(dtype)
torch.manual_seed(50051)
net_imp_gpu = net_cls(spatial_shape, ConvAlgo.MaskImplicitGemm).to(device).to(dtype)
torch.manual_seed(50051)
net_simp_gpu = net_cls(spatial_shape, ConvAlgo.MaskSplitImplicitGemm).to(device).to(dtype)
spconv.assign_name_for_sparse_modules(net_native_cpu)
spconv.assign_name_for_sparse_modules(net_native_gpu)
spconv.assign_name_for_sparse_modules(net_imp_gpu)
spconv.assign_name_for_sparse_modules(net_simp_gpu)
with torch.no_grad():
out: torch.Tensor = net_native_cpu(voxels_th, coors_th, 1).dense()
dout = np.random.uniform(-0.2, 0.2, out.shape).astype(np.float32)
dout_t = torch.from_numpy(dout).to(device_cpu).to(dtype)
dout_t_cu = torch.from_numpy(dout).to(device).to(dtype)
t = time.time()
print(1, time.time() - t)
out_cpu = net_native_cpu(voxels_th, coors_th, 1).dense()
if dtype != torch.float16:
out_cpu.backward(dout_t)
out = net_native_gpu(voxels_th_cuda, coors_th_cuda, 1).dense()
print(2, time.time() - t)
out.backward(dout_t_cu)
out_imp = net_imp_gpu(voxels_th_cuda, coors_th_cuda, 1).dense()
print(3, time.time() - t)
out_imp.backward(dout_t_cu)
out_simp = net_simp_gpu(voxels_th_cuda, coors_th_cuda, 1).dense()
print(4, time.time() - t)
out_simp.backward(dout_t_cu)
with torch.no_grad():
dense_cpu = out_cpu.cuda()
dense_native = out
dense_imp = out_imp
dense_simp = out_simp
error_native = torch.linalg.norm(dense_cpu - dense_native).cpu().item()
error_imp = torch.linalg.norm(dense_cpu - dense_imp).cpu().item()
error_simp = torch.linalg.norm(dense_cpu - dense_simp).cpu().item()
print(5, time.time() - t)
print("error_native", error_native)
print("error_imp", error_imp)
print("error_simp", error_simp)
if dtype == torch.float32:
assert error_native < 0.01
assert error_imp < 0.01
assert error_simp < 0.01
else:
assert error_native < 10
assert error_imp < 10
assert error_simp < 10
cpu_params = dict(net_native_cpu.named_parameters())
native_params = dict(net_native_gpu.named_parameters())
imp_params = dict(net_imp_gpu.named_parameters())
simp_params = dict(net_simp_gpu.named_parameters())
for k, cpu_w in cpu_params.items():
native_w = native_params[k]
imp_w = imp_params[k]
simp_w = simp_params[k]
native_w_grad = native_w.grad.detach()
imp_w_grad = imp_w.grad.detach()
simp_w_grad = simp_w.grad.detach()
if dtype != torch.float16:
cpu_w_grad = cpu_w.grad.detach().cuda()
error_native = torch.linalg.norm(native_w_grad - cpu_w_grad).cpu().item()
error_imp = torch.linalg.norm(native_w_grad - imp_w_grad).cpu().item()
error_simp = torch.linalg.norm(native_w_grad - simp_w_grad).cpu().item()
print(k, error_imp, error_simp)
assert error_imp < 1
assert error_simp < 1
def test_multi_impl():
_test_multi_impl(torch.float32)
_test_multi_impl(torch.float16)
if __name__ == "__main__":
test_multi_impl()
| apache-2.0 |
vinayak-mehta/scikit-learn | sklearn/utils/tests/test_param_validation.py | 9 | 20820 | from numbers import Integral, Real
import numpy as np
from scipy.sparse import csr_matrix
import pytest
from sklearn.base import BaseEstimator
from sklearn.model_selection import LeaveOneOut
from sklearn.utils import deprecated
from sklearn.utils._param_validation import Hidden
from sklearn.utils._param_validation import Interval
from sklearn.utils._param_validation import Options
from sklearn.utils._param_validation import StrOptions
from sklearn.utils._param_validation import _ArrayLikes
from sklearn.utils._param_validation import _Booleans
from sklearn.utils._param_validation import _Callables
from sklearn.utils._param_validation import _CVObjects
from sklearn.utils._param_validation import _InstancesOf
from sklearn.utils._param_validation import _MissingValues
from sklearn.utils._param_validation import _PandasNAConstraint
from sklearn.utils._param_validation import _IterablesNotString
from sklearn.utils._param_validation import _NoneConstraint
from sklearn.utils._param_validation import _RandomStates
from sklearn.utils._param_validation import _SparseMatrices
from sklearn.utils._param_validation import _VerboseHelper
from sklearn.utils._param_validation import HasMethods
from sklearn.utils._param_validation import make_constraint
from sklearn.utils._param_validation import generate_invalid_param_val
from sklearn.utils._param_validation import generate_valid_param
from sklearn.utils._param_validation import validate_params
# Some helpers for the tests
@validate_params({"a": [Real], "b": [Real], "c": [Real], "d": [Real]})
def _func(a, b=0, *args, c, d=0, **kwargs):
"""A function to test the validation of functions."""
class _Class:
"""A class to test the _InstancesOf constraint and the validation of methods."""
@validate_params({"a": [Real]})
def _method(self, a):
"""A validated method"""
@deprecated()
@validate_params({"a": [Real]})
def _deprecated_method(self, a):
"""A deprecated validated method"""
class _Estimator(BaseEstimator):
"""An estimator to test the validation of estimator parameters."""
_parameter_constraints: dict = {"a": [Real]}
def __init__(self, a):
self.a = a
def fit(self, X=None, y=None):
self._validate_params()
@pytest.mark.parametrize("interval_type", [Integral, Real])
def test_interval_range(interval_type):
"""Check the range of values depending on closed."""
interval = Interval(interval_type, -2, 2, closed="left")
assert -2 in interval and 2 not in interval
interval = Interval(interval_type, -2, 2, closed="right")
assert -2 not in interval and 2 in interval
interval = Interval(interval_type, -2, 2, closed="both")
assert -2 in interval and 2 in interval
interval = Interval(interval_type, -2, 2, closed="neither")
assert -2 not in interval and 2 not in interval
def test_interval_inf_in_bounds():
"""Check that inf is included iff a bound is closed and set to None.
Only valid for real intervals.
"""
interval = Interval(Real, 0, None, closed="right")
assert np.inf in interval
interval = Interval(Real, None, 0, closed="left")
assert -np.inf in interval
interval = Interval(Real, None, None, closed="neither")
assert np.inf not in interval
assert -np.inf not in interval
@pytest.mark.parametrize(
"interval",
[Interval(Real, 0, 1, closed="left"), Interval(Real, None, None, closed="both")],
)
def test_nan_not_in_interval(interval):
"""Check that np.nan is not in any interval."""
assert np.nan not in interval
@pytest.mark.parametrize(
"params, error, match",
[
(
{"type": Integral, "left": 1.0, "right": 2, "closed": "both"},
TypeError,
r"Expecting left to be an int for an interval over the integers",
),
(
{"type": Integral, "left": 1, "right": 2.0, "closed": "neither"},
TypeError,
"Expecting right to be an int for an interval over the integers",
),
(
{"type": Integral, "left": None, "right": 0, "closed": "left"},
ValueError,
r"left can't be None when closed == left",
),
(
{"type": Integral, "left": 0, "right": None, "closed": "right"},
ValueError,
r"right can't be None when closed == right",
),
(
{"type": Integral, "left": 1, "right": -1, "closed": "both"},
ValueError,
r"right can't be less than left",
),
],
)
def test_interval_errors(params, error, match):
"""Check that informative errors are raised for invalid combination of parameters"""
with pytest.raises(error, match=match):
Interval(**params)
def test_stroptions():
"""Sanity check for the StrOptions constraint"""
options = StrOptions({"a", "b", "c"}, deprecated={"c"})
assert options.is_satisfied_by("a")
assert options.is_satisfied_by("c")
assert not options.is_satisfied_by("d")
assert "'c' (deprecated)" in str(options)
def test_options():
"""Sanity check for the Options constraint"""
options = Options(Real, {-0.5, 0.5, np.inf}, deprecated={-0.5})
assert options.is_satisfied_by(-0.5)
assert options.is_satisfied_by(np.inf)
assert not options.is_satisfied_by(1.23)
assert "-0.5 (deprecated)" in str(options)
@pytest.mark.parametrize(
"type, expected_type_name",
[
(int, "int"),
(Integral, "int"),
(Real, "float"),
(np.ndarray, "numpy.ndarray"),
],
)
def test_instances_of_type_human_readable(type, expected_type_name):
"""Check the string representation of the _InstancesOf constraint."""
constraint = _InstancesOf(type)
assert str(constraint) == f"an instance of '{expected_type_name}'"
def test_hasmethods():
"""Check the HasMethods constraint."""
constraint = HasMethods(["a", "b"])
class _Good:
def a(self):
pass # pragma: no cover
def b(self):
pass # pragma: no cover
class _Bad:
def a(self):
pass # pragma: no cover
assert constraint.is_satisfied_by(_Good())
assert not constraint.is_satisfied_by(_Bad())
assert str(constraint) == "an object implementing 'a' and 'b'"
@pytest.mark.parametrize(
"constraint",
[
Interval(Real, None, 0, closed="left"),
Interval(Real, 0, None, closed="left"),
Interval(Real, None, None, closed="neither"),
StrOptions({"a", "b", "c"}),
_MissingValues(),
_VerboseHelper(),
HasMethods("fit"),
_IterablesNotString(),
_CVObjects(),
],
)
def test_generate_invalid_param_val(constraint):
"""Check that the value generated does not satisfy the constraint"""
bad_value = generate_invalid_param_val(constraint)
assert not constraint.is_satisfied_by(bad_value)
@pytest.mark.parametrize(
"integer_interval, real_interval",
[
(
Interval(Integral, None, 3, closed="right"),
Interval(Real, -5, 5, closed="both"),
),
(
Interval(Integral, None, 3, closed="right"),
Interval(Real, -5, 5, closed="neither"),
),
(
Interval(Integral, None, 3, closed="right"),
Interval(Real, 4, 5, closed="both"),
),
(
Interval(Integral, None, 3, closed="right"),
Interval(Real, 5, None, closed="left"),
),
(
Interval(Integral, None, 3, closed="right"),
Interval(Real, 4, None, closed="neither"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(Real, -5, 5, closed="both"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(Real, -5, 5, closed="neither"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(Real, 1, 2, closed="both"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(Real, None, -5, closed="left"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(Real, None, -4, closed="neither"),
),
(
Interval(Integral, -5, 5, closed="both"),
Interval(Real, None, 1, closed="right"),
),
(
Interval(Integral, -5, 5, closed="both"),
Interval(Real, 1, None, closed="left"),
),
(
Interval(Integral, -5, 5, closed="both"),
Interval(Real, -10, -4, closed="neither"),
),
(
Interval(Integral, -5, 5, closed="both"),
Interval(Real, -10, -4, closed="right"),
),
(
Interval(Integral, -5, 5, closed="neither"),
Interval(Real, 6, 10, closed="neither"),
),
(
Interval(Integral, -5, 5, closed="neither"),
Interval(Real, 6, 10, closed="left"),
),
(
Interval(Integral, 2, None, closed="left"),
Interval(Real, 0, 1, closed="both"),
),
(
Interval(Integral, 1, None, closed="left"),
Interval(Real, 0, 1, closed="both"),
),
],
)
def test_generate_invalid_param_val_2_intervals(integer_interval, real_interval):
"""Check that the value generated for an interval constraint does not satisfy any of
the interval constraints.
"""
bad_value = generate_invalid_param_val(
real_interval, constraints=[real_interval, integer_interval]
)
assert not real_interval.is_satisfied_by(bad_value)
assert not integer_interval.is_satisfied_by(bad_value)
bad_value = generate_invalid_param_val(
integer_interval, constraints=[real_interval, integer_interval]
)
assert not real_interval.is_satisfied_by(bad_value)
assert not integer_interval.is_satisfied_by(bad_value)
@pytest.mark.parametrize(
"constraints",
[
[_ArrayLikes()],
[_InstancesOf(list)],
[_Callables()],
[_NoneConstraint()],
[_RandomStates()],
[_SparseMatrices()],
[_Booleans()],
[Interval(Real, None, None, closed="both")],
[
Interval(Integral, 0, None, closed="left"),
Interval(Real, None, 0, closed="neither"),
],
],
)
def test_generate_invalid_param_val_all_valid(constraints):
"""Check that the function raises NotImplementedError when there's no invalid value
for the constraint.
"""
with pytest.raises(NotImplementedError):
generate_invalid_param_val(constraints[0], constraints=constraints)
@pytest.mark.parametrize(
"constraint",
[
_ArrayLikes(),
_Callables(),
_InstancesOf(list),
_NoneConstraint(),
_RandomStates(),
_SparseMatrices(),
_Booleans(),
_VerboseHelper(),
_MissingValues(),
StrOptions({"a", "b", "c"}),
Options(Integral, {1, 2, 3}),
Interval(Integral, None, None, closed="neither"),
Interval(Integral, 0, 10, closed="neither"),
Interval(Integral, 0, None, closed="neither"),
Interval(Integral, None, 0, closed="neither"),
Interval(Real, 0, 1, closed="neither"),
Interval(Real, 0, None, closed="both"),
Interval(Real, None, 0, closed="right"),
HasMethods("fit"),
_IterablesNotString(),
_CVObjects(),
],
)
def test_generate_valid_param(constraint):
"""Check that the value generated does satisfy the constraint."""
value = generate_valid_param(constraint)
assert constraint.is_satisfied_by(value)
@pytest.mark.parametrize(
"constraint_declaration, value",
[
(Interval(Real, 0, 1, closed="both"), 0.42),
(Interval(Integral, 0, None, closed="neither"), 42),
(StrOptions({"a", "b", "c"}), "b"),
(Options(type, {np.float32, np.float64}), np.float64),
(callable, lambda x: x + 1),
(None, None),
("array-like", [[1, 2], [3, 4]]),
("array-like", np.array([[1, 2], [3, 4]])),
("sparse matrix", csr_matrix([[1, 2], [3, 4]])),
("random_state", 0),
("random_state", np.random.RandomState(0)),
("random_state", None),
(_Class, _Class()),
(int, 1),
(Real, 0.5),
("boolean", False),
("verbose", 1),
("missing_values", -1),
("missing_values", -1.0),
("missing_values", None),
("missing_values", float("nan")),
("missing_values", np.nan),
("missing_values", "missing"),
(HasMethods("fit"), _Estimator(a=0)),
("cv_object", 5),
],
)
def test_is_satisfied_by(constraint_declaration, value):
"""Sanity check for the is_satisfied_by method"""
constraint = make_constraint(constraint_declaration)
assert constraint.is_satisfied_by(value)
@pytest.mark.parametrize(
"constraint_declaration, expected_constraint_class",
[
(Interval(Real, 0, 1, closed="both"), Interval),
(StrOptions({"option1", "option2"}), StrOptions),
(Options(Real, {0.42, 1.23}), Options),
("array-like", _ArrayLikes),
("sparse matrix", _SparseMatrices),
("random_state", _RandomStates),
(None, _NoneConstraint),
(callable, _Callables),
(int, _InstancesOf),
("boolean", _Booleans),
("verbose", _VerboseHelper),
("missing_values", _MissingValues),
(HasMethods("fit"), HasMethods),
("cv_object", _CVObjects),
],
)
def test_make_constraint(constraint_declaration, expected_constraint_class):
"""Check that make_constraint dispaches to the appropriate constraint class"""
constraint = make_constraint(constraint_declaration)
assert constraint.__class__ is expected_constraint_class
def test_make_constraint_unknown():
"""Check that an informative error is raised when an unknown constraint is passed"""
with pytest.raises(ValueError, match="Unknown constraint"):
make_constraint("not a valid constraint")
def test_validate_params():
"""Check that validate_params works no matter how the arguments are passed"""
with pytest.raises(ValueError, match="The 'a' parameter of _func must be"):
_func("wrong", c=1)
with pytest.raises(ValueError, match="The 'b' parameter of _func must be"):
_func(*[1, "wrong"], c=1)
with pytest.raises(ValueError, match="The 'c' parameter of _func must be"):
_func(1, **{"c": "wrong"})
with pytest.raises(ValueError, match="The 'd' parameter of _func must be"):
_func(1, c=1, d="wrong")
# check in the presence of extra positional and keyword args
with pytest.raises(ValueError, match="The 'b' parameter of _func must be"):
_func(0, *["wrong", 2, 3], c=4, **{"e": 5})
with pytest.raises(ValueError, match="The 'c' parameter of _func must be"):
_func(0, *[1, 2, 3], c="four", **{"e": 5})
def test_validate_params_match_error():
"""Check that an informative error is raised when there are constraints
that have no matching function paramaters
"""
@validate_params({"a": [int], "c": [int]})
def func(a, b):
pass
match = r"The parameter constraints .* contain unexpected parameters {'c'}"
with pytest.raises(ValueError, match=match):
func(1, 2)
def test_validate_params_missing_params():
"""Check that no error is raised when there are parameters without
constraints
"""
@validate_params({"a": [int]})
def func(a, b):
pass
func(1, 2)
def test_decorate_validated_function():
"""Check that validate_params functions can be decorated"""
decorated_function = deprecated()(_func)
with pytest.warns(FutureWarning, match="Function _func is deprecated"):
decorated_function(1, 2, c=3)
# outer decorator does not interfer with validation
with pytest.warns(FutureWarning, match="Function _func is deprecated"):
with pytest.raises(ValueError, match=r"The 'c' parameter of _func must be"):
decorated_function(1, 2, c="wrong")
def test_validate_params_method():
"""Check that validate_params works with methods"""
with pytest.raises(ValueError, match="The 'a' parameter of _Class._method must be"):
_Class()._method("wrong")
# validated method can be decorated
with pytest.warns(FutureWarning, match="Function _deprecated_method is deprecated"):
with pytest.raises(
ValueError, match="The 'a' parameter of _Class._deprecated_method must be"
):
_Class()._deprecated_method("wrong")
def test_validate_params_estimator():
"""Check that validate_params works with Estimator instances"""
# no validation in init
est = _Estimator("wrong")
with pytest.raises(ValueError, match="The 'a' parameter of _Estimator must be"):
est.fit()
def test_stroptions_deprecated_subset():
"""Check that the deprecated parameter must be a subset of options."""
with pytest.raises(ValueError, match="deprecated options must be a subset"):
StrOptions({"a", "b", "c"}, deprecated={"a", "d"})
def test_hidden_constraint():
"""Check that internal constraints are not exposed in the error message."""
@validate_params({"param": [Hidden(list), dict]})
def f(param):
pass
# list and dict are valid params
f({"a": 1, "b": 2, "c": 3})
f([1, 2, 3])
with pytest.raises(ValueError, match="The 'param' parameter") as exc_info:
f(param="bad")
# the list option is not exposed in the error message
err_msg = str(exc_info.value)
assert "an instance of 'dict'" in err_msg
assert "an instance of 'list'" not in err_msg
def test_hidden_stroptions():
"""Check that we can have 2 StrOptions constraints, one being hidden."""
@validate_params({"param": [StrOptions({"auto"}), Hidden(StrOptions({"warn"}))]})
def f(param):
pass
# "auto" and "warn" are valid params
f("auto")
f("warn")
with pytest.raises(ValueError, match="The 'param' parameter") as exc_info:
f(param="bad")
# the "warn" option is not exposed in the error message
err_msg = str(exc_info.value)
assert "auto" in err_msg
assert "warn" not in err_msg
def test_validate_params_set_param_constraints_attribute():
"""Check that the validate_params decorator properly sets the parameter constraints
as attribute of the decorated function/method.
"""
assert hasattr(_func, "_skl_parameter_constraints")
assert hasattr(_Class()._method, "_skl_parameter_constraints")
def test_boolean_constraint_deprecated_int():
"""Check that validate_params raise a deprecation message but still passes
validation when using an int for a parameter accepting a boolean.
"""
@validate_params({"param": ["boolean"]})
def f(param):
pass
# True/False and np.bool_(True/False) are valid params
f(True)
f(np.bool_(False))
# an int is also valid but deprecated
with pytest.warns(
FutureWarning, match="Passing an int for a boolean parameter is deprecated"
):
f(1)
def test_no_validation():
"""Check that validation can be skipped for a parameter."""
@validate_params({"param1": [int, None], "param2": "no_validation"})
def f(param1=None, param2=None):
pass
# param1 is validated
with pytest.raises(ValueError, match="The 'param1' parameter"):
f(param1="wrong")
# param2 is not validated: any type is valid.
class SomeType:
pass
f(param2=SomeType)
f(param2=SomeType())
def test_pandas_na_constraint_with_pd_na():
"""Add a specific test for checking support for `pandas.NA`."""
pd = pytest.importorskip("pandas")
na_constraint = _PandasNAConstraint()
assert na_constraint.is_satisfied_by(pd.NA)
assert not na_constraint.is_satisfied_by(np.array([1, 2, 3]))
def test_iterable_not_string():
"""Check that a string does not satisfy the _IterableNotString constraint."""
constraint = _IterablesNotString()
assert constraint.is_satisfied_by([1, 2, 3])
assert constraint.is_satisfied_by(range(10))
assert not constraint.is_satisfied_by("some string")
def test_cv_objects():
"""Check that the _CVObjects constraint accepts all current ways
to pass cv objects."""
constraint = _CVObjects()
assert constraint.is_satisfied_by(5)
assert constraint.is_satisfied_by(LeaveOneOut())
assert constraint.is_satisfied_by([([1, 2], [3, 4]), ([3, 4], [1, 2])])
assert constraint.is_satisfied_by(None)
assert not constraint.is_satisfied_by("not a CV object")
| bsd-3-clause |
Adai0808/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 217 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
nansencenter/nansat | nansat/mappers/mapper_obpg_l3.py | 1 | 8499 | # Name: mapper_obpg_l3
# Purpose: Mapping for L3 data from the OBPG web-site
# Authors: Anton Korosov
# Licence: This file is part of NANSAT. You can redistribute it or modify
# under the terms of GNU General Public License, v.3
# http://www.gnu.org/licenses/gpl-3.0.html
from __future__ import unicode_literals, absolute_import, division, print_function
import datetime
import os.path
import glob
import numpy as np
from nansat.utils import gdal, ogr
from nansat.vrt import VRT
from nansat.nsr import NSR
from nansat.exceptions import WrongMapperError
class Mapper(VRT):
''' Mapper for Level-3 Standard Mapped Image from
http://oceancolor.gsfc.nasa.gov'''
# detect wkv from metadata 'Parameter'
param2wkv = {'Chlorophyll a concentration': 'mass_concentration_of_chlorophyll_a_in_sea_water',
'Diffuse attenuation coefficient': 'volume_attenuation_coefficient_of_downwelling_'
'radiative_flux_in_sea_water',
'Remote sensing reflectance': 'surface_ratio_of_upwelling_radiance_emerging_from_'
'sea_water_to_downwelling_radiative_flux_in_air',
'CDOM Index': 'volume_absorption_coefficient_of_radiative_flux_in_sea_water_due_'
'to_dissolved_organic_matter',
'Sea Surface Salinity': 'sea_surface_salinity',
'Sea Surface Temperature': 'sea_surface_temperature',
'Instantaneous Photosynthetically Available Radiation': 'instantaneous_photosynthetically_available_radiation',
'Particle backscatter at 443 nm': 'volume_backscattering_coefficient_of_radiative_flux_in_sea_water_due_to_suspended_particles',
'Chlorophyll a concentration, Garver-Siegel-Maritorena Model': 'mass_concentration_of_chlorophyll_a_in_sea_water',
'Photosynthetically Available Radiation': 'downwelling_photosynthetic_photon_radiance_in_sea_water',
'Instantaneous Photosynthetically Available Radiation': 'instantaneous_downwelling_photosynthetic_photon_radiance_in_sea_water',
}
def __init__(self, filename, gdalDataset, gdalMetadata, **kwargs):
''' OBPG L3 VRT '''
try:
assert 'Level-3 Standard Mapped Image' in gdalMetadata['Title']
except:
raise WrongMapperError
# get list of similar (same date) files in the directory
iDir, iFile = os.path.split(filename)
iFileName, iFileExt = os.path.splitext(iFile)
simFilesMask = os.path.join(iDir, iFileName)
simFiles = glob.glob(simFilesMask + iFileExt[0:6] + '*')
#print 'simFilesMask, simFiles', simFilesMask, simFiles
metaDict = []
for simFile in simFiles:
#print 'simFile', simFile
# open file, get metadata and get parameter name
simSupDataset = gdal.Open(simFile)
if simSupDataset is None:
# skip this similar file
#print 'No dataset: %s not a supported SMI file' % simFile
continue
# get subdatasets from the similar file
simSubDatasets = simSupDataset.GetSubDatasets()
if len(simSubDatasets) > 0:
for simSubDataset in simSubDatasets:
#print 'simSubDataset', simSubDataset
if 'l3m_data' in simSubDataset[1]:
# get SourceFilename from subdataset
tmpSourceFilename = simSubDataset[0]
break
else:
# get SourceFilename from dataset
tmpSourceFilename = simFile
# open subdataset with GDAL
#print 'tmpSourceFilename', tmpSourceFilename
tmpGdalDataset = gdal.Open(tmpSourceFilename)
try:
# get metadata, get 'Parameter'
tmpGdalMetadata = tmpGdalDataset.GetMetadata()
simParameter = tmpGdalMetadata['Parameter']
except:
print('No parameter: %s not a supported SMI file')
continue
else:
# set params of the similar file
simSourceFilename = tmpSourceFilename
simGdalDataset = tmpGdalDataset
simGdalMetadata = tmpGdalMetadata
# get WKV from the similar file
#print 'simParameter', simParameter
for param in self.param2wkv:
#print 'param', param
if param in simParameter:
simWKV = self.param2wkv[param]
break
# generate entry to metaDict
metaEntry = {'src': {'SourceFilename': simSourceFilename,
'SourceBand': 1,
'ScaleRatio': float(simGdalMetadata['Slope']),
'ScaleOffset': float(simGdalMetadata['Intercept'])},
'dst': {'wkv': simWKV}}
# add wavelength and BandName
if ' at ' in simParameter and ' nm' in simParameter:
simWavelength = simParameter.split(' at ')[1].split(' nm')[0]
metaEntry['dst']['suffix'] = simWavelength
metaEntry['dst']['wavelength'] = simWavelength
# add band with Rrsw
metaEntry2 = None
if simWKV == 'surface_ratio_of_upwelling_radiance_emerging_from_sea_water_to_downwelling_radiative_flux_in_air':
metaEntry2 = {'src': [metaEntry['src']]}
metaEntry2['dst'] = {'wkv': 'surface_ratio_of_upwelling_radiance_emerging_from_sea_water_to_downwelling_radiative_flux_in_water',
'suffix': simWavelength,
'wavelength': simWavelength,
'PixelFunctionType': 'NormReflectanceToRemSensReflectance',
}
# append entry to metaDict
metaDict.append(metaEntry)
if metaEntry2 is not None:
metaDict.append(metaEntry2)
#get array with data and make 'mask'
a = simGdalDataset.ReadAsArray()
mask = np.zeros(a.shape, 'uint8') + 64
mask[a < -32000] = 1
self.band_vrts = {'mask': VRT(array=mask)}
metaDict.append(
{'src': {'SourceFilename': self.band_vrts['mask'].filename,
'SourceBand': 1},
'dst': {'name': 'mask'}})
# create empty VRT dataset with geolocation only
# print 'simGdalMetadata', simGdalMetadata
latitudeStep = float(simGdalMetadata.
get('Latitude Step',
simGdalMetadata.get('Latitude_Step', 1)))
longitudeStep = float(simGdalMetadata.
get('Longitude Step',
simGdalMetadata.get('Longitude_Step', 1)))
numberOfColumns = int(simGdalMetadata.
get('Number of Columns',
simGdalMetadata.get('Number_of_Columns', 1)))
numberOfLines = int(simGdalMetadata.
get('Number of Lines',
simGdalMetadata.get('Number_of_Lines', 1)))
#longitudeStep = float(simGdalMetadata['Longitude Step'])
# x_size, y_size, geo_transform, projection, gcps=None, gcp_projection='', **kwargs
self._init_from_dataset_params(numberOfColumns, numberOfLines,
(-180.0, longitudeStep, 0.0, 90.0, 0.0, -longitudeStep),
NSR().wkt)
# add bands with metadata and corresponding values to the empty VRT
self.create_bands(metaDict)
# Add valid time
startYear = int(simGdalMetadata.get('Start Year',
simGdalMetadata.
get('Start_Year', 1)))
startDay = int(simGdalMetadata.get('Start Day',
simGdalMetadata.
get('Start)Day', 1)))
self.dataset.SetMetadataItem('time_coverage_start',
(datetime.datetime(startYear, 1, 1) +
datetime.timedelta(startDay)).isoformat())
| gpl-3.0 |
mlskit/astromlskit | FRONTEND/svmfront.py | 2 | 8728 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'svmui.ui'
#
# Created: Sun Mar 22 21:45:22 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
from sklearn import svm
import numpy as np
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(253, 569)
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setGeometry(QtCore.QRect(10, 10, 221, 61))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.lineEdit = QtGui.QLineEdit(self.groupBox)
self.lineEdit.setGeometry(QtCore.QRect(40, 20, 141, 20))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.groupBox_4 = QtGui.QGroupBox(Form)
self.groupBox_4.setGeometry(QtCore.QRect(10, 70, 231, 381))
self.groupBox_4.setObjectName(_fromUtf8("groupBox_4"))
self.label_2 = QtGui.QLabel(self.groupBox_4)
self.label_2.setGeometry(QtCore.QRect(60, 20, 81, 20))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.spinBox_2 = QtGui.QSpinBox(self.groupBox_4)
self.spinBox_2.setGeometry(QtCore.QRect(150, 20, 42, 22))
self.spinBox_2.setObjectName(_fromUtf8("spinBox_2"))
self.label_3 = QtGui.QLabel(self.groupBox_4)
self.label_3.setGeometry(QtCore.QRect(60, 50, 81, 20))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.spinBox_3 = QtGui.QSpinBox(self.groupBox_4)
self.spinBox_3.setGeometry(QtCore.QRect(150, 50, 42, 22))
self.spinBox_3.setObjectName(_fromUtf8("spinBox_3"))
self.label_4 = QtGui.QLabel(self.groupBox_4)
self.label_4.setGeometry(QtCore.QRect(60, 70, 81, 20))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.spinBox_4 = QtGui.QSpinBox(self.groupBox_4)
self.spinBox_4.setGeometry(QtCore.QRect(150, 70, 42, 22))
self.spinBox_4.setObjectName(_fromUtf8("spinBox_4"))
self.label_5 = QtGui.QLabel(self.groupBox_4)
self.label_5.setGeometry(QtCore.QRect(60, 100, 81, 20))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.spinBox_5 = QtGui.QSpinBox(self.groupBox_4)
self.spinBox_5.setGeometry(QtCore.QRect(150, 100, 42, 22))
self.spinBox_5.setObjectName(_fromUtf8("spinBox_5"))
self.label_6 = QtGui.QLabel(self.groupBox_4)
self.label_6.setGeometry(QtCore.QRect(60, 130, 81, 20))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.spinBox_6 = QtGui.QSpinBox(self.groupBox_4)
self.spinBox_6.setGeometry(QtCore.QRect(150, 130, 42, 22))
self.spinBox_6.setObjectName(_fromUtf8("spinBox_6"))
self.label_7 = QtGui.QLabel(self.groupBox_4)
self.label_7.setGeometry(QtCore.QRect(60, 160, 81, 20))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.spinBox_7 = QtGui.QSpinBox(self.groupBox_4)
self.spinBox_7.setGeometry(QtCore.QRect(150, 160, 42, 22))
self.spinBox_7.setObjectName(_fromUtf8("spinBox_7"))
self.label_8 = QtGui.QLabel(self.groupBox_4)
self.label_8.setGeometry(QtCore.QRect(60, 190, 81, 20))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.label_9 = QtGui.QLabel(self.groupBox_4)
self.label_9.setGeometry(QtCore.QRect(60, 220, 81, 20))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.spinBox_9 = QtGui.QSpinBox(self.groupBox_4)
self.spinBox_9.setGeometry(QtCore.QRect(150, 220, 42, 22))
self.spinBox_9.setObjectName(_fromUtf8("spinBox_9"))
self.spinBox_10 = QtGui.QSpinBox(self.groupBox_4)
self.spinBox_10.setGeometry(QtCore.QRect(150, 250, 42, 22))
self.spinBox_10.setObjectName(_fromUtf8("spinBox_10"))
self.label_10 = QtGui.QLabel(self.groupBox_4)
self.label_10.setGeometry(QtCore.QRect(60, 250, 81, 20))
self.label_10.setObjectName(_fromUtf8("label_10"))
self.spinBox_11 = QtGui.QSpinBox(self.groupBox_4)
self.spinBox_11.setGeometry(QtCore.QRect(150, 280, 42, 22))
self.spinBox_11.setObjectName(_fromUtf8("spinBox_11"))
self.label_11 = QtGui.QLabel(self.groupBox_4)
self.label_11.setGeometry(QtCore.QRect(60, 280, 81, 20))
self.label_11.setObjectName(_fromUtf8("label_11"))
self.checkBox_6 = QtGui.QCheckBox(self.groupBox_4)
self.checkBox_6.setGeometry(QtCore.QRect(60, 340, 181, 17))
self.checkBox_6.setObjectName(_fromUtf8("checkBox_6"))
self.checkBox_7 = QtGui.QCheckBox(self.groupBox_4)
self.checkBox_7.setGeometry(QtCore.QRect(60, 310, 181, 17))
self.checkBox_7.setObjectName(_fromUtf8("checkBox_7"))
self.comboBox = QtGui.QComboBox(self.groupBox_4)
self.comboBox.setGeometry(QtCore.QRect(150, 190, 69, 22))
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.pushButton = QtGui.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(50, 470, 161, 23))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton.clicked.connect(self.takeinput)
self.pushButton_3 = QtGui.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(50, 530, 161, 23))
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.pushButton_3.clicked.connect(self.startsvm)
self.pushButton_2 = QtGui.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(50, 500, 161, 23))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_2.clicked.connect(self.taketest)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def startsvm(self):
clf=svm.SVC()
clf.fit(self.tr,self.classlabels)
for i in self.te:
print "test record:",i,"classlabel:",clf.predict(i)
def takeinput(self):
fname = QtGui.QFileDialog.getOpenFileName(None, 'Open file', 'C:')
print type(fname)
import pandas as pd
df = pd.read_csv(str(fname), sep=",")
x=list(df[list(df)[0]])
y=list(df[list(df)[1]])
self.classlabels=list(df[list(df)[2]])
print self.classlabels
self.tr=(zip(x,y))
def taketest(self):
fname = QtGui.QFileDialog.getOpenFileName(None, 'Open file', 'C:')
print type(fname)
import pandas as pd
df = pd.read_csv(str(fname), sep=",")
x=list(df[list(df)[0]])
y=list(df[list(df)[1]])
#print x,y
self.te=(zip(x,y))
#print (self.te)
#print len(np.array(self.te).shape)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "Learner/Classifier Name", None))
self.lineEdit.setText(_translate("Form", "Support vector machines", None))
self.groupBox_4.setTitle(_translate("Form", "parameters", None))
self.label_2.setText(_translate("Form", "C", None))
self.label_3.setText(_translate("Form", "cache_size", None))
self.label_4.setText(_translate("Form", "class_weight", None))
self.label_5.setText(_translate("Form", "coeff0", None))
self.label_6.setText(_translate("Form", "degree", None))
self.label_7.setText(_translate("Form", "gamma", None))
self.label_8.setText(_translate("Form", "kernel", None))
self.label_9.setText(_translate("Form", "probability", None))
self.label_10.setText(_translate("Form", "tol", None))
self.label_11.setText(_translate("Form", "randomstate", None))
self.checkBox_6.setText(_translate("Form", "verbose", None))
self.checkBox_7.setText(_translate("Form", "shrinking", None))
self.comboBox.setItemText(0, _translate("Form", "rbf", None))
self.comboBox.setItemText(1, _translate("Form", "linear", None))
self.pushButton.setText(_translate("Form", "Train File", None))
self.pushButton_3.setText(_translate("Form", "Start", None))
self.pushButton_2.setText(_translate("Form", "Test file", None))
| gpl-3.0 |
lisa-lab/pylearn2 | pylearn2/datasets/tests/test_mnist.py | 45 | 2298 | from pylearn2.datasets.mnist import MNIST
from pylearn2.space import IndexSpace, VectorSpace
import unittest
from pylearn2.testing.skip import skip_if_no_data
import numpy as np
class TestMNIST(unittest.TestCase):
def setUp(self):
skip_if_no_data()
self.train = MNIST(which_set='train')
self.test = MNIST(which_set='test')
def test_range(self):
"""Tests that the data spans [0,1]"""
for X in [self.train.X, self.test.X]:
assert X.min() == 0.0
assert X.max() == 1.0
def test_topo(self):
"""Tests that a topological batch has 4 dimensions"""
topo = self.train.get_batch_topo(1)
assert topo.ndim == 4
def test_topo_c01b(self):
"""
Tests that a topological batch with axes ('c',0,1,'b')
can be dimshuffled back to match the standard ('b',0,1,'c')
format.
"""
batch_size = 100
c01b_test = MNIST(which_set='test', axes=('c', 0, 1, 'b'))
c01b_X = c01b_test.X[0:batch_size, :]
c01b = c01b_test.get_topological_view(c01b_X)
assert c01b.shape == (1, 28, 28, batch_size)
b01c = c01b.transpose(3, 1, 2, 0)
b01c_X = self.test.X[0:batch_size, :]
assert c01b_X.shape == b01c_X.shape
assert np.all(c01b_X == b01c_X)
b01c_direct = self.test.get_topological_view(b01c_X)
assert b01c_direct.shape == b01c.shape
assert np.all(b01c_direct == b01c)
def test_y_index_space(self):
"""
Tests that requesting the targets to be in IndexSpace and iterating
over them works
"""
data_specs = (IndexSpace(max_labels=10, dim=1), 'targets')
it = self.test.iterator(mode='sequential',
data_specs=data_specs,
batch_size=100)
for y in it:
pass
def test_y_vector_space(self):
"""
Tests that requesting the targets to be in VectorSpace and iterating
over them works
"""
data_specs = (VectorSpace(dim=10), 'targets')
it = self.test.iterator(mode='sequential',
data_specs=data_specs,
batch_size=100)
for y in it:
pass
| bsd-3-clause |
ppries/tensorflow | tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py | 12 | 9744 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.metrics.python.ops import histogram_ops
class Strict1dCumsumTest(tf.test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = tf.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = tf.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = tf.constant([3], dtype=tf.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = tf.constant([3], dtype=tf.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = tf.constant([1, 2, 3], dtype=tf.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = tf.constant([1, 3, 6], dtype=tf.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(tf.test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = tf.constant([], shape=[0], dtype=tf.bool)
scores = tf.constant([], shape=[0], dtype=tf.float32)
score_range = [0, 1.]
auc, update_op = tf.contrib.metrics.auc_using_histogram(labels, scores,
score_range)
tf.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = tf.placeholder(tf.bool, shape=[num_records])
scores = tf.placeholder(tf.float32, shape=[num_records])
auc, update_op = tf.contrib.metrics.auc_using_histogram(labels,
scores,
score_range,
nbins=nbins)
tf.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
SU-ECE-17-7/hotspotter | _graveyard/oldhotspotter/ideas/matching_graph.py | 2 | 5495 | N = 2000 #min_image_threshold
Beta = 1.5 #
def matchgraph(hs):
cm, vm = hs.get_managers("cm","vm")
# Load Images
cx_list = cm.all_valid_cxs()
num_cx = len(cx_list) # number of chips (> 10,000)
# Train Default Bag-of-Words Model
V = vm.train_model(cx_list) # The Set of Bag-of-Words Vectors (normalized, tf-idf preweighted)
if len(V) < 2:
raise Exception('Cannot build matchgraph')
# Preallocate Intermediate Variables
dims = len(V[0]) # dimensionality of bag of words (>1,000,000,000)
W = eye(len(cx_list)) # The learned weighting of word histogram similarity
Sim = np.zeros((num_cx,num_cx), dtype=uint8)
svm_train_examples = np.zeros((num_cx,num_cx, dims), dtype=float)
# Perform Batch Query
for x_b, a in enumerate(V): # a = query vector
for x_a, b in enumerate(V): # b = database vector
svm_train_examples[x_a, x_b, :] = qvec * dvec # LEARN!
Sim[x_a, x_b] = np.transpose(qvec).dot(W).dot(dvec)
tops_x = Sim[x_a, :].argsort()[::,-1] # sorted indexes
spatial_rerank(Sim, tops_x)
# Train SVM
wT = np.transpose(w)
def hinge_loss(y, x, w):
val = 1 - y * wT.dot(x)
return max(0, val)
def svm_cost(y, x, w, C):
from sklearn import svm
# C is regularization variable
training_pairs = rand.select(cx_list, 'pairwise', replacement=False)
.5 * wT.dot(w) + C * sum( [hinge_loss(y[a,b], x[a,b], w) for (a,b) in training_pairs ] )**2
C = param(1, min=0, max=inf)
kernel = param('''Specifies the kernel type to be used in the algorithm.
It must be one of linear, poly, rbf, sigmoid,
precomputed or a callable. If none is given, rbf will
be used. If a callable is given it is used to precompute
the kernel matrix.''',
'linear', choices=['linear','poly','rbf','sigmoid','precomputed'])
degree = param('''Degree of kernel function. It is significant only in
poly and sigmoid''',
3, sigifeq=(kernel,['poly','sigmoid']))
gamma = param('''Kernel coefficient for rbf and poly. If gamma is
0.0 then 1/n_features will be used instead.''',
0, sigifeq=(kernel,['poly', rbf]))
coef0 = param('''Independent term in kernel function. It is only
significant in poly and sigmoid.''',
0, sigifeq=(kernel,['poly','sigmoid']))
probability = param('''Whether to enable probability estimates. This
must be enabled prior to calling predict_proba.''',
False)
tol = param(''' Tolerance for stopping criterion.''', 1e-3)
shrinking = param(True, 'use shrinking heuristic')
cache_size = param('', 0)
class_weight = param('''Set the parameter C of class i to
class_weight[i]*C for SVC. If not given, all
classes are supposed to have weight one. The auto
mode uses the values of y to automatically adjust
weights inversely proportional to class
frequencies.''',
'auto', {dict, 'auto'})
max_iter = param('''Hard limit on iterations within solver, or -1 for
no limit.''', -1)
import numpy as np
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
from sklearn.svm import SVC
clf = SVC()
clf.fit(X, y)
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
gamma=0.0, kernel='rbf', max_iter=-1, probability=False,
shrinking=True, tol=0.001, verbose=False)
print(clf.predict([[-0.8, -1]]))
'''
http://scikit-learn.org/dev/modules/generated/sklearn.svm.SVC.html
decision_function(X) Distance of the samples X to the separating hyperplane.
fit(X, y[, sample_weight]) Fit the SVM model according to the given training data.
get_params([deep]) Get parameters for the estimator
predict(X) Perform classification on samples in X.
predict_log_proba(X) Compute log probabilities of possible outcomes for samples in X.
predict_proba(X) Compute probabilities of possible outcomes for samples in X.
score(X, y) Returns the mean accuracy on the given test data and labels.
set_params(**params) Set the parameters of the estimator.
'''
svm.SVC( C=1, kernel=kernel()
# 3.2 Iterative Learning and Matching
# w = with vanilla tf-idf Rank[a,b]
# While: True
# Compute pairwise similarity of all images using weights w
# Foreach image rerank a shortlist of its most similar matches
# if OPTION_1:
# train_data, train_labels =
# Learn w using Linear SVM
#
# Two Stratagies:
# Match images with similarity above some threshold
# Match images to their #1 Rank
w = minimize( )
# Get Pairwise Matches
qres_list = vm.batch_query(cx_list, method="TFIDF")
qres_list.matching
MatchingGraph = np.matrix(2,3)
for res in qres_list:
qcx = res.qcx
fm.
| apache-2.0 |
Adai0808/scikit-learn | sklearn/random_projection.py | 206 | 22098 | # -*- coding: utf8
"""Random Projection transformers
Random Projections are a simple and computationally efficient way to
reduce the dimensionality of the data by trading a controlled amount
of accuracy (as additional variance) for faster processing times and
smaller model sizes.
The dimensions and distribution of Random Projections matrices are
controlled so as to preserve the pairwise distances between any two
samples of the dataset.
The main theoretical result behind the efficiency of random projection is the
`Johnson-Lindenstrauss lemma (quoting Wikipedia)
<http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_:
In mathematics, the Johnson-Lindenstrauss lemma is a result
concerning low-distortion embeddings of points from high-dimensional
into low-dimensional Euclidean space. The lemma states that a small set
of points in a high-dimensional space can be embedded into a space of
much lower dimension in such a way that distances between the points are
nearly preserved. The map used for the embedding is at least Lipschitz,
and can even be taken to be an orthogonal projection.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>,
# Arnaud Joly <a.joly@ulg.ac.be>
# License: BSD 3 clause
from __future__ import division
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.testing import assert_equal
import scipy.sparse as sp
from .base import BaseEstimator, TransformerMixin
from .externals import six
from .externals.six.moves import xrange
from .utils import check_random_state
from .utils.extmath import safe_sparse_dot
from .utils.random import sample_without_replacement
from .utils.validation import check_array, NotFittedError
from .utils import DataDimensionalityWarning
__all__ = ["SparseRandomProjection",
"GaussianRandomProjection",
"johnson_lindenstrauss_min_dim"]
def johnson_lindenstrauss_min_dim(n_samples, eps=0.1):
"""Find a 'safe' number of components to randomly project to
The distortion introduced by a random projection `p` only changes the
distance between two points by a factor (1 +- eps) in an euclidean space
with good probability. The projection `p` is an eps-embedding as defined
by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features], eps is in ]0, 1[ and p is a projection by a random Gaussian
N(0, 1) matrix with shape [n_components, n_features] (or a sparse
Achlioptas matrix).
The minimum number of components to guarantee the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
Note that the number of dimensions is independent of the original
number of features but instead depends on the size of the dataset:
the larger the dataset, the higher is the minimal dimensionality of
an eps-embedding.
Read more in the :ref:`User Guide <johnson_lindenstrauss>`.
Parameters
----------
n_samples : int or numpy array of int greater than 0,
Number of samples. If an array is given, it will compute
a safe number of components array-wise.
eps : float or numpy array of float in ]0,1[, optional (default=0.1)
Maximum distortion rate as defined by the Johnson-Lindenstrauss lemma.
If an array is given, it will compute a safe number of components
array-wise.
Returns
-------
n_components : int or numpy array of int,
The minimal number of components to guarantee with good probability
an eps-embedding with n_samples.
Examples
--------
>>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)
663
>>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01])
array([ 663, 11841, 1112658])
>>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1)
array([ 7894, 9868, 11841])
References
----------
.. [1] http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
.. [2] Sanjoy Dasgupta and Anupam Gupta, 1999,
"An elementary proof of the Johnson-Lindenstrauss Lemma."
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654
"""
eps = np.asarray(eps)
n_samples = np.asarray(n_samples)
if np.any(eps <= 0.0) or np.any(eps >= 1):
raise ValueError(
"The JL bound is defined for eps in ]0, 1[, got %r" % eps)
if np.any(n_samples) <= 0:
raise ValueError(
"The JL bound is defined for n_samples greater than zero, got %r"
% n_samples)
denominator = (eps ** 2 / 2) - (eps ** 3 / 3)
return (4 * np.log(n_samples) / denominator).astype(np.int)
def _check_density(density, n_features):
"""Factorize density check according to Li et al."""
if density == 'auto':
density = 1 / np.sqrt(n_features)
elif density <= 0 or density > 1:
raise ValueError("Expected density in range ]0, 1], got: %r"
% density)
return density
def _check_input_size(n_components, n_features):
"""Factorize argument checking for random matrix generation"""
if n_components <= 0:
raise ValueError("n_components must be strictly positive, got %d" %
n_components)
if n_features <= 0:
raise ValueError("n_features must be strictly positive, got %d" %
n_components)
def gaussian_random_matrix(n_components, n_features, random_state=None):
""" Generate a dense Gaussian random matrix.
The components of the random matrix are drawn from
N(0, 1.0 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
random_state : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Returns
-------
components : numpy array of shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
GaussianRandomProjection
sparse_random_matrix
"""
_check_input_size(n_components, n_features)
rng = check_random_state(random_state)
components = rng.normal(loc=0.0,
scale=1.0 / np.sqrt(n_components),
size=(n_components, n_features))
return components
def sparse_random_matrix(n_components, n_features, density='auto',
random_state=None):
"""Generalized Achlioptas random sparse matrix for random projection
Setting density to 1 / 3 will yield the original matrix by Dimitris
Achlioptas while setting a lower value will yield the generalization
by Ping Li et al.
If we note :math:`s = 1 / density`, the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
density : float in range ]0, 1] or 'auto', optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Returns
-------
components: numpy array or CSR matrix with shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
SparseRandomProjection
gaussian_random_matrix
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
http://www.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
http://www.cs.ucsc.edu/~optas/papers/jl.pdf
"""
_check_input_size(n_components, n_features)
density = _check_density(density, n_features)
rng = check_random_state(random_state)
if density == 1:
# skip index generation if totally dense
components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1
return 1 / np.sqrt(n_components) * components
else:
# Generate location of non zero elements
indices = []
offset = 0
indptr = [offset]
for i in xrange(n_components):
# find the indices of the non-zero components for row i
n_nonzero_i = rng.binomial(n_features, density)
indices_i = sample_without_replacement(n_features, n_nonzero_i,
random_state=rng)
indices.append(indices_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
# Among non zero components the probability of the sign is 50%/50%
data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1
# build the CSR structure by concatenating the rows
components = sp.csr_matrix((data, indices, indptr),
shape=(n_components, n_features))
return np.sqrt(1 / density) / np.sqrt(n_components) * components
class BaseRandomProjection(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class for random projections.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self, n_components='auto', eps=0.1, dense_output=False,
random_state=None):
self.n_components = n_components
self.eps = eps
self.dense_output = dense_output
self.random_state = random_state
self.components_ = None
self.n_components_ = None
@abstractmethod
def _make_random_matrix(n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
def fit(self, X, y=None):
"""Generate a sparse random projection matrix
Parameters
----------
X : numpy array or scipy.sparse of shape [n_samples, n_features]
Training set: only the shape is used to find optimal random
matrix dimensions based on the theory referenced in the
afore mentioned papers.
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
self
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples, n_features = X.shape
if self.n_components == 'auto':
self.n_components_ = johnson_lindenstrauss_min_dim(
n_samples=n_samples, eps=self.eps)
if self.n_components_ <= 0:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is invalid' % (
self.eps, n_samples, self.n_components_))
elif self.n_components_ > n_features:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is larger than the original space with '
'n_features=%d' % (self.eps, n_samples, self.n_components_,
n_features))
else:
if self.n_components <= 0:
raise ValueError("n_components must be greater than 0, got %s"
% self.n_components_)
elif self.n_components > n_features:
warnings.warn(
"The number of components is higher than the number of"
" features: n_features < n_components (%s < %s)."
"The dimensionality of the problem will not be reduced."
% (n_features, self.n_components),
DataDimensionalityWarning)
self.n_components_ = self.n_components
# Generate a projection matrix of size [n_components, n_features]
self.components_ = self._make_random_matrix(self.n_components_,
n_features)
# Check contract
assert_equal(
self.components_.shape,
(self.n_components_, n_features),
err_msg=('An error has occurred the self.components_ matrix has '
' not the proper shape.'))
return self
def transform(self, X, y=None):
"""Project the data by using matrix product with the random matrix
Parameters
----------
X : numpy array or scipy.sparse of shape [n_samples, n_features]
The input data to project into a smaller dimensional space.
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array or scipy sparse of shape [n_samples, n_components]
Projected array.
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
if self.components_ is None:
raise NotFittedError('No random projection matrix had been fit.')
if X.shape[1] != self.components_.shape[1]:
raise ValueError(
'Impossible to perform projection:'
'X at fit stage had a different number of features. '
'(%s != %s)' % (X.shape[1], self.components_.shape[1]))
X_new = safe_sparse_dot(X, self.components_.T,
dense_output=self.dense_output)
return X_new
class GaussianRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through Gaussian random projection
The components of the random matrix are drawn from N(0, 1 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int or 'auto', optional (default = 'auto')
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
eps : strictly positive float, optional (default=0.1)
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Attributes
----------
n_component_ : int
Concrete number of components computed when n_components="auto".
components_ : numpy array of shape [n_components, n_features]
Random matrix used for the projection.
See Also
--------
SparseRandomProjection
"""
def __init__(self, n_components='auto', eps=0.1, random_state=None):
super(GaussianRandomProjection, self).__init__(
n_components=n_components,
eps=eps,
dense_output=True,
random_state=random_state)
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
return gaussian_random_matrix(n_components,
n_features,
random_state=random_state)
class SparseRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through sparse random projection
Sparse random matrix is an alternative to dense random
projection matrix that guarantees similar embedding quality while being
much more memory efficient and allowing faster computation of the
projected data.
If we note `s = 1 / density` the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int or 'auto', optional (default = 'auto')
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
density : float in range ]0, 1], optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
eps : strictly positive float, optional, (default=0.1)
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
dense_output : boolean, optional (default=False)
If True, ensure that the output of the random projection is a
dense numpy array even if the input and random projection matrix
are both sparse. In practice, if the number of components is
small the number of zero components in the projected data will
be very small and it will be more CPU and memory efficient to
use a dense representation.
If False, the projected data uses a sparse representation if
the input is sparse.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Attributes
----------
n_component_ : int
Concrete number of components computed when n_components="auto".
components_ : CSR matrix with shape [n_components, n_features]
Random matrix used for the projection.
density_ : float in range 0.0 - 1.0
Concrete density computed from when density = "auto".
See Also
--------
GaussianRandomProjection
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
http://www.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
http://www.cs.ucsc.edu/~optas/papers/jl.pdf
"""
def __init__(self, n_components='auto', density='auto', eps=0.1,
dense_output=False, random_state=None):
super(SparseRandomProjection, self).__init__(
n_components=n_components,
eps=eps,
dense_output=dense_output,
random_state=random_state)
self.density = density
self.density_ = None
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
self.density_ = _check_density(self.density, n_features)
return sparse_random_matrix(n_components,
n_features,
density=self.density_,
random_state=random_state)
| bsd-3-clause |
KDB2/OpenReliability | veusz/document/loader.py | 2 | 8303 | # Copyright (C) 2014 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
# note: no future statements here for backward compatibility
import sys
import os.path
import traceback
import io
import numpy as N
from .. import qtall as qt4
from .. import setting
from .. import utils
from ..compat import cexec, cstr, cstrerror, cbytes, cexceptionuser
from .commandinterface import CommandInterface
from . import datasets
# loaded lazily
h5py = None
def _(text, disambiguation=None, context='DocumentLoader'):
"""Translate text."""
return qt4.QCoreApplication.translate(context, text, disambiguation)
class LoadError(RuntimeError):
"""Error when loading document."""
def __init__(self, text, backtrace=''):
RuntimeError.__init__(self, text)
self.backtrace = backtrace
def bconv(s):
"""Sometimes h5py returns non-unicode strings,
so hack to decode strings if in wrong format."""
if isinstance(s, cbytes):
return s.decode('utf-8')
return s
def executeScript(thedoc, filename, script, callbackunsafe=None):
"""Execute a script for the document.
This handles setting up the environment and checking for unsafe
commands in the execution.
filename: filename to supply in __filename__
script: text to execute
callbackunsafe: should be set to a function to ask the user whether it is
ok to execute any unsafe commands found. Return True if ok.
User should wipe docment before calling this.
"""
def genexception(exc):
info = sys.exc_info()
backtrace = ''.join(traceback.format_exception(*info))
return LoadError(cexceptionuser(exc), backtrace=backtrace)
# compile script and check for security (if reqd)
unsafe = [setting.transient_settings['unsafe_mode']]
while True:
try:
compiled = utils.compileChecked(
script, mode='exec', filename=filename,
ignoresecurity=unsafe[0])
break
except utils.SafeEvalException:
if callbackunsafe is None or not callbackunsafe():
raise LoadError(_("Unsafe command in script"))
# repeat with unsafe mode switched on
unsafe[0] = True
except Exception as e:
raise genexception(e)
env = thedoc.evaluate.context.copy()
interface = CommandInterface(thedoc)
# allow safe commands as-is
for cmd in interface.safe_commands:
env[cmd] = getattr(interface, cmd)
# define root node
env['Root'] = interface.Root
# wrap unsafe calls with a function to check whether ok
def _unsafecaller(func):
def wrapped(*args, **argsk):
if not unsafe[0]:
if callbackunsafe is None or not callbackunsafe():
raise LoadError(_("Unsafe command in script"))
unsafe[0] = True
func(*args, **argsk)
return wrapped
for name in interface.unsafe_commands:
env[name] = _unsafecaller(getattr(interface, name))
# get ready for loading document
env['__file__'] = filename
# allow import to happen relative to loaded file
interface.AddImportPath( os.path.dirname(os.path.abspath(filename)) )
with thedoc.suspend():
try:
# actually run script text
cexec(compiled, env)
except LoadError:
raise
except Exception as e:
raise genexception(e)
def loadHDF5Dataset1D(datagrp):
args = {}
# this weird usage of sets is to work around some sort of weird
# error where h5py gives an error when doing 'a' in datagrp
# this gives error: 'perr' in datagrp
parts = set(datagrp) & set(('data', 'serr', 'perr', 'nerr'))
for v in parts:
args[v] = N.array(datagrp[v])
return datasets.Dataset(**args)
def loadHDF5Dataset2D(datagrp):
args = {}
parts = set(datagrp) & set(
('data', 'xcent', 'xedge', 'ycent', 'yedge', 'xrange', 'yrange'))
for v in parts:
args[v] = N.array(datagrp[v])
return datasets.Dataset2D(**args)
def loadHDF5DatasetDate(datagrp):
return datasets.DatasetDateTime(data=datagrp['data'])
def loadHDF5DatasetText(datagrp):
data = [d.decode('utf-8') for d in datagrp['data']]
return datasets.DatasetText(data=data)
def loadHDF5Datasets(thedoc, hdffile):
"""Load all the Veusz datasets in the HDF5 file."""
alldatagrp = hdffile['Veusz']['Data']
datafuncs = {
'1d': loadHDF5Dataset1D,
'2d': loadHDF5Dataset2D,
'date': loadHDF5DatasetDate,
'text': loadHDF5DatasetText,
}
for name in alldatagrp:
datagrp = alldatagrp[name]
datatype = bconv(datagrp.attrs['vsz_datatype'])
veuszname = utils.unescapeHDFDataName(bconv(name))
dataset = datafuncs[datatype](datagrp)
thedoc.setData(veuszname, dataset)
def tagHDF5Datasets(thedoc, hdffile):
"""Tag datasets loaded from HDF5 file."""
tags = hdffile['Veusz']['Document']['Tags']
for tag in tags:
vsztag = bconv(tag)
datasets = tags[tag]
for name in datasets:
dsname = name.decode('utf-8')
thedoc.data[dsname].tags.add(vsztag)
def loadHDF5Doc(thedoc, filename, callbackunsafe=None):
"""Load an HDF5 of the name given."""
try:
global h5py
import h5py
except ImportError:
raise LoadError(_("No HDF5 support as h5py module is missing"))
with thedoc.suspend():
thedoc.wipe()
hdffile = h5py.File(filename, 'r')
try:
vszformat = hdffile['Veusz'].attrs['vsz_format']
vszversion = hdffile['Veusz'].attrs['vsz_version']
except KeyError:
raise LoadError(_("HDF5 file '%s' is not a Veusz saved document") %
os.path.basename(filename))
maxformat = 1
if vszformat > maxformat:
raise LoadError(_("This document version (%i) is not supported. "
"It was written by Veusz %s.\n"
"This Veusz only supports document version %i." %
(vszformat, vszversion, maxformat)))
# load document
script = hdffile['Veusz']['Document']['document'][0].decode('utf-8')
executeScript(thedoc, filename, script, callbackunsafe=callbackunsafe)
# then load datasets
loadHDF5Datasets(thedoc, hdffile)
# and then tag
tagHDF5Datasets(thedoc, hdffile)
hdffile.close()
def loadDocument(thedoc, filename, mode='vsz', callbackunsafe=None):
"""Load document from file.
mode is 'vsz' or 'hdf5'
"""
if mode == 'vsz':
try:
with io.open(filename, 'rU', encoding='utf-8') as f:
script = f.read()
except EnvironmentError as e:
raise LoadError( _("Cannot open document '%s'\n\n%s") %
(os.path.basename(filename), cstrerror(e)) )
except UnicodeDecodeError:
raise LoadError( _("File '%s' is not a valid Veusz document") %
os.path.basename(filename) )
thedoc.wipe()
executeScript(thedoc, filename, script, callbackunsafe=callbackunsafe)
elif mode == 'hdf5':
loadHDF5Doc(thedoc, filename, callbackunsafe=callbackunsafe)
else:
raise RuntimeError('Invalid load mode')
thedoc.setModified(False)
thedoc.clearHistory()
| gpl-2.0 |
Adai0808/scikit-learn | examples/svm/plot_iris.py | 223 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
sunny256/linux | tools/power/pm-graph/analyze_suspend.py | 76 | 182868 | #!/usr/bin/python
#
# Tool for analyzing suspend/resume timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# Authors:
# Todd Brandt <todd.e.brandt@linux.intel.com>
#
# Links:
# Home Page
# https://01.org/suspendresume
# Source repo
# https://github.com/01org/pm-graph
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's suspend/resume time. Using a kernel image built
# with a few extra options enabled, the tool will execute a suspend and
# will capture dmesg and ftrace data until resume is complete. This data
# is transformed into a device timeline and a callgraph to give a quick
# and detailed view of which devices and callbacks are taking the most
# time in suspend/resume. The output is a single html file which can be
# viewed in firefox or chrome.
#
# The following kernel build options are required:
# CONFIG_PM_DEBUG=y
# CONFIG_PM_SLEEP_DEBUG=y
# CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER=y
# CONFIG_FUNCTION_GRAPH_TRACER=y
# CONFIG_KPROBES=y
# CONFIG_KPROBES_ON_FTRACE=y
#
# For kernel versions older than 3.15:
# The following additional kernel parameters are required:
# (e.g. in file /etc/default/grub)
# GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..."
#
# ----------------- LIBRARIES --------------------
import sys
import time
import os
import string
import re
import platform
from datetime import datetime
import struct
import ConfigParser
from threading import Thread
from subprocess import call, Popen, PIPE
# ----------------- CLASSES --------------------
# Class: SystemValues
# Description:
# A global, single-instance container used to
# store system values and test parameters
class SystemValues:
title = 'SleepGraph'
version = '4.7'
ansi = False
verbose = False
testlog = True
dmesglog = False
ftracelog = False
mindevlen = 0.0
mincglen = 0.0
cgphase = ''
cgtest = -1
max_graph_depth = 0
callloopmaxgap = 0.0001
callloopmaxlen = 0.005
cpucount = 0
memtotal = 204800
srgap = 0
cgexp = False
testdir = ''
tpath = '/sys/kernel/debug/tracing/'
fpdtpath = '/sys/firmware/acpi/tables/FPDT'
epath = '/sys/kernel/debug/tracing/events/power/'
traceevents = [
'suspend_resume',
'device_pm_callback_end',
'device_pm_callback_start'
]
logmsg = ''
testcommand = ''
mempath = '/dev/mem'
powerfile = '/sys/power/state'
mempowerfile = '/sys/power/mem_sleep'
suspendmode = 'mem'
memmode = ''
hostname = 'localhost'
prefix = 'test'
teststamp = ''
sysstamp = ''
dmesgstart = 0.0
dmesgfile = ''
ftracefile = ''
htmlfile = 'output.html'
embedded = False
rtcwake = True
rtcwaketime = 15
rtcpath = ''
devicefilter = []
stamp = 0
execcount = 1
x2delay = 0
usecallgraph = False
usetraceevents = False
usetraceeventsonly = False
usetracemarkers = True
usekprobes = True
usedevsrc = False
useprocmon = False
notestrun = False
mixedphaseheight = True
devprops = dict()
predelay = 0
postdelay = 0
procexecfmt = 'ps - (?P<ps>.*)$'
devpropfmt = '# Device Properties: .*'
tracertypefmt = '# tracer: (?P<t>.*)'
firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
tracefuncs = {
'sys_sync': dict(),
'pm_prepare_console': dict(),
'pm_notifier_call_chain': dict(),
'freeze_processes': dict(),
'freeze_kernel_threads': dict(),
'pm_restrict_gfp_mask': dict(),
'acpi_suspend_begin': dict(),
'suspend_console': dict(),
'acpi_pm_prepare': dict(),
'syscore_suspend': dict(),
'arch_enable_nonboot_cpus_end': dict(),
'syscore_resume': dict(),
'acpi_pm_finish': dict(),
'resume_console': dict(),
'acpi_pm_end': dict(),
'pm_restore_gfp_mask': dict(),
'thaw_processes': dict(),
'pm_restore_console': dict(),
'CPU_OFF': {
'func':'_cpu_down',
'args_x86_64': {'cpu':'%di:s32'},
'format': 'CPU_OFF[{cpu}]'
},
'CPU_ON': {
'func':'_cpu_up',
'args_x86_64': {'cpu':'%di:s32'},
'format': 'CPU_ON[{cpu}]'
},
}
dev_tracefuncs = {
# general wait/delay/sleep
'msleep': { 'args_x86_64': {'time':'%di:s32'}, 'ub': 1 },
'schedule_timeout_uninterruptible': { 'args_x86_64': {'timeout':'%di:s32'}, 'ub': 1 },
'schedule_timeout': { 'args_x86_64': {'timeout':'%di:s32'}, 'ub': 1 },
'udelay': { 'func':'__const_udelay', 'args_x86_64': {'loops':'%di:s32'}, 'ub': 1 },
'usleep_range': { 'args_x86_64': {'min':'%di:s32', 'max':'%si:s32'}, 'ub': 1 },
'mutex_lock_slowpath': { 'func':'__mutex_lock_slowpath', 'ub': 1 },
'acpi_os_stall': {'ub': 1},
# ACPI
'acpi_resume_power_resources': dict(),
'acpi_ps_parse_aml': dict(),
# filesystem
'ext4_sync_fs': dict(),
# 80211
'iwlagn_mac_start': dict(),
'iwlagn_alloc_bcast_station': dict(),
'iwl_trans_pcie_start_hw': dict(),
'iwl_trans_pcie_start_fw': dict(),
'iwl_run_init_ucode': dict(),
'iwl_load_ucode_wait_alive': dict(),
'iwl_alive_start': dict(),
'iwlagn_mac_stop': dict(),
'iwlagn_mac_suspend': dict(),
'iwlagn_mac_resume': dict(),
'iwlagn_mac_add_interface': dict(),
'iwlagn_mac_remove_interface': dict(),
'iwlagn_mac_change_interface': dict(),
'iwlagn_mac_config': dict(),
'iwlagn_configure_filter': dict(),
'iwlagn_mac_hw_scan': dict(),
'iwlagn_bss_info_changed': dict(),
'iwlagn_mac_channel_switch': dict(),
'iwlagn_mac_flush': dict(),
# ATA
'ata_eh_recover': { 'args_x86_64': {'port':'+36(%di):s32'} },
# i915
'i915_gem_resume': dict(),
'i915_restore_state': dict(),
'intel_opregion_setup': dict(),
'g4x_pre_enable_dp': dict(),
'vlv_pre_enable_dp': dict(),
'chv_pre_enable_dp': dict(),
'g4x_enable_dp': dict(),
'vlv_enable_dp': dict(),
'intel_hpd_init': dict(),
'intel_opregion_register': dict(),
'intel_dp_detect': dict(),
'intel_hdmi_detect': dict(),
'intel_opregion_init': dict(),
'intel_fbdev_set_suspend': dict(),
}
kprobes = dict()
timeformat = '%.3f'
def __init__(self):
# if this is a phoronix test run, set some default options
if('LOG_FILE' in os.environ and 'TEST_RESULTS_IDENTIFIER' in os.environ):
self.embedded = True
self.dmesglog = self.ftracelog = True
self.htmlfile = os.environ['LOG_FILE']
self.archargs = 'args_'+platform.machine()
self.hostname = platform.node()
if(self.hostname == ''):
self.hostname = 'localhost'
rtc = "rtc0"
if os.path.exists('/dev/rtc'):
rtc = os.readlink('/dev/rtc')
rtc = '/sys/class/rtc/'+rtc
if os.path.exists(rtc) and os.path.exists(rtc+'/date') and \
os.path.exists(rtc+'/time') and os.path.exists(rtc+'/wakealarm'):
self.rtcpath = rtc
if (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()):
self.ansi = True
self.testdir = datetime.now().strftime('suspend-%y%m%d-%H%M%S')
def rootCheck(self, fatal=True):
if(os.access(self.powerfile, os.W_OK)):
return True
if fatal:
doError('This command requires sysfs mount and root access')
return False
def rootUser(self, fatal=False):
if 'USER' in os.environ and os.environ['USER'] == 'root':
return True
if fatal:
doError('This command must be run as root')
return False
def setPrecision(self, num):
if num < 0 or num > 6:
return
self.timeformat = '%.{0}f'.format(num)
def setOutputFolder(self, value):
args = dict()
n = datetime.now()
args['date'] = n.strftime('%y%m%d')
args['time'] = n.strftime('%H%M%S')
args['hostname'] = self.hostname
return value.format(**args)
def setOutputFile(self):
if self.dmesgfile != '':
m = re.match('(?P<name>.*)_dmesg\.txt$', self.dmesgfile)
if(m):
self.htmlfile = m.group('name')+'.html'
if self.ftracefile != '':
m = re.match('(?P<name>.*)_ftrace\.txt$', self.ftracefile)
if(m):
self.htmlfile = m.group('name')+'.html'
def systemInfo(self, info):
p = c = m = b = ''
if 'baseboard-manufacturer' in info:
m = info['baseboard-manufacturer']
elif 'system-manufacturer' in info:
m = info['system-manufacturer']
if 'baseboard-product-name' in info:
p = info['baseboard-product-name']
elif 'system-product-name' in info:
p = info['system-product-name']
if 'processor-version' in info:
c = info['processor-version']
if 'bios-version' in info:
b = info['bios-version']
self.sysstamp = '# sysinfo | man:%s | plat:%s | cpu:%s | bios:%s | numcpu:%d | memsz:%d' % \
(m, p, c, b, self.cpucount, self.memtotal)
def printSystemInfo(self):
self.rootCheck(True)
out = dmidecode(self.mempath, True)
fmt = '%-24s: %s'
for name in sorted(out):
print fmt % (name, out[name])
print fmt % ('cpucount', ('%d' % self.cpucount))
print fmt % ('memtotal', ('%d kB' % self.memtotal))
def cpuInfo(self):
self.cpucount = 0
fp = open('/proc/cpuinfo', 'r')
for line in fp:
if re.match('^processor[ \t]*:[ \t]*[0-9]*', line):
self.cpucount += 1
fp.close()
fp = open('/proc/meminfo', 'r')
for line in fp:
m = re.match('^MemTotal:[ \t]*(?P<sz>[0-9]*) *kB', line)
if m:
self.memtotal = int(m.group('sz'))
break
fp.close()
def initTestOutput(self, name):
self.prefix = self.hostname
v = open('/proc/version', 'r').read().strip()
kver = string.split(v)[2]
fmt = name+'-%m%d%y-%H%M%S'
testtime = datetime.now().strftime(fmt)
self.teststamp = \
'# '+testtime+' '+self.prefix+' '+self.suspendmode+' '+kver
if(self.embedded):
self.dmesgfile = \
'/tmp/'+testtime+'_'+self.suspendmode+'_dmesg.txt'
self.ftracefile = \
'/tmp/'+testtime+'_'+self.suspendmode+'_ftrace.txt'
return
self.dmesgfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_dmesg.txt'
self.ftracefile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_ftrace.txt'
self.htmlfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'.html'
if not os.path.isdir(self.testdir):
os.mkdir(self.testdir)
def setDeviceFilter(self, value):
self.devicefilter = []
if value:
value = value.split(',')
for i in value:
self.devicefilter.append(i.strip())
def rtcWakeAlarmOn(self):
call('echo 0 > '+self.rtcpath+'/wakealarm', shell=True)
outD = open(self.rtcpath+'/date', 'r').read().strip()
outT = open(self.rtcpath+'/time', 'r').read().strip()
mD = re.match('^(?P<y>[0-9]*)-(?P<m>[0-9]*)-(?P<d>[0-9]*)', outD)
mT = re.match('^(?P<h>[0-9]*):(?P<m>[0-9]*):(?P<s>[0-9]*)', outT)
if(mD and mT):
# get the current time from hardware
utcoffset = int((datetime.now() - datetime.utcnow()).total_seconds())
dt = datetime(\
int(mD.group('y')), int(mD.group('m')), int(mD.group('d')),
int(mT.group('h')), int(mT.group('m')), int(mT.group('s')))
nowtime = int(dt.strftime('%s')) + utcoffset
else:
# if hardware time fails, use the software time
nowtime = int(datetime.now().strftime('%s'))
alarm = nowtime + self.rtcwaketime
call('echo %d > %s/wakealarm' % (alarm, self.rtcpath), shell=True)
def rtcWakeAlarmOff(self):
call('echo 0 > %s/wakealarm' % self.rtcpath, shell=True)
def initdmesg(self):
# get the latest time stamp from the dmesg log
fp = Popen('dmesg', stdout=PIPE).stdout
ktime = '0'
for line in fp:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
ktime = m.group('ktime')
fp.close()
self.dmesgstart = float(ktime)
def getdmesg(self):
# store all new dmesg lines since initdmesg was called
fp = Popen('dmesg', stdout=PIPE).stdout
op = open(self.dmesgfile, 'a')
for line in fp:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
if ktime > self.dmesgstart:
op.write(line)
fp.close()
op.close()
def addFtraceFilterFunctions(self, file):
fp = open(file)
list = fp.read().split('\n')
fp.close()
for i in list:
if len(i) < 2:
continue
self.tracefuncs[i] = dict()
def getFtraceFilterFunctions(self, current):
self.rootCheck(True)
if not current:
call('cat '+self.tpath+'available_filter_functions', shell=True)
return
fp = open(self.tpath+'available_filter_functions')
master = fp.read().split('\n')
fp.close()
for i in self.tracefuncs:
if 'func' in self.tracefuncs[i]:
i = self.tracefuncs[i]['func']
if i in master:
print i
else:
print self.colorText(i)
def setFtraceFilterFunctions(self, list):
fp = open(self.tpath+'available_filter_functions')
master = fp.read().split('\n')
fp.close()
flist = ''
for i in list:
if i not in master:
continue
if ' [' in i:
flist += i.split(' ')[0]+'\n'
else:
flist += i+'\n'
fp = open(self.tpath+'set_graph_function', 'w')
fp.write(flist)
fp.close()
def basicKprobe(self, name):
self.kprobes[name] = {'name': name,'func': name,'args': dict(),'format': name}
def defaultKprobe(self, name, kdata):
k = kdata
for field in ['name', 'format', 'func']:
if field not in k:
k[field] = name
if self.archargs in k:
k['args'] = k[self.archargs]
else:
k['args'] = dict()
k['format'] = name
self.kprobes[name] = k
def kprobeColor(self, name):
if name not in self.kprobes or 'color' not in self.kprobes[name]:
return ''
return self.kprobes[name]['color']
def kprobeDisplayName(self, name, dataraw):
if name not in self.kprobes:
self.basicKprobe(name)
data = ''
quote=0
# first remvoe any spaces inside quotes, and the quotes
for c in dataraw:
if c == '"':
quote = (quote + 1) % 2
if quote and c == ' ':
data += '_'
elif c != '"':
data += c
fmt, args = self.kprobes[name]['format'], self.kprobes[name]['args']
arglist = dict()
# now process the args
for arg in sorted(args):
arglist[arg] = ''
m = re.match('.* '+arg+'=(?P<arg>.*) ', data);
if m:
arglist[arg] = m.group('arg')
else:
m = re.match('.* '+arg+'=(?P<arg>.*)', data);
if m:
arglist[arg] = m.group('arg')
out = fmt.format(**arglist)
out = out.replace(' ', '_').replace('"', '')
return out
def kprobeText(self, kname, kprobe):
name = fmt = func = kname
args = dict()
if 'name' in kprobe:
name = kprobe['name']
if 'format' in kprobe:
fmt = kprobe['format']
if 'func' in kprobe:
func = kprobe['func']
if self.archargs in kprobe:
args = kprobe[self.archargs]
if 'args' in kprobe:
args = kprobe['args']
if re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', func):
doError('Kprobe "%s" has format info in the function name "%s"' % (name, func))
for arg in re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', fmt):
if arg not in args:
doError('Kprobe "%s" is missing argument "%s"' % (name, arg))
val = 'p:%s_cal %s' % (name, func)
for i in sorted(args):
val += ' %s=%s' % (i, args[i])
val += '\nr:%s_ret %s $retval\n' % (name, func)
return val
def addKprobes(self, output=False):
if len(self.kprobes) < 1:
return
if output:
print(' kprobe functions in this kernel:')
# first test each kprobe
rejects = []
# sort kprobes: trace, ub-dev, custom, dev
kpl = [[], [], [], []]
for name in sorted(self.kprobes):
res = self.colorText('YES', 32)
if not self.testKprobe(name, self.kprobes[name]):
res = self.colorText('NO')
rejects.append(name)
else:
if name in self.tracefuncs:
kpl[0].append(name)
elif name in self.dev_tracefuncs:
if 'ub' in self.dev_tracefuncs[name]:
kpl[1].append(name)
else:
kpl[3].append(name)
else:
kpl[2].append(name)
if output:
print(' %s: %s' % (name, res))
kplist = kpl[0] + kpl[1] + kpl[2] + kpl[3]
# remove all failed ones from the list
for name in rejects:
self.kprobes.pop(name)
# set the kprobes all at once
self.fsetVal('', 'kprobe_events')
kprobeevents = ''
for kp in kplist:
kprobeevents += self.kprobeText(kp, self.kprobes[kp])
self.fsetVal(kprobeevents, 'kprobe_events')
# verify that the kprobes were set as ordered
check = self.fgetVal('kprobe_events')
linesout = len(kprobeevents.split('\n')) - 1
linesack = len(check.split('\n')) - 1
if output:
res = '%d/%d' % (linesack, linesout)
if linesack < linesout:
res = self.colorText(res, 31)
else:
res = self.colorText(res, 32)
print(' working kprobe functions enabled: %s' % res)
self.fsetVal('1', 'events/kprobes/enable')
def testKprobe(self, kname, kprobe):
self.fsetVal('0', 'events/kprobes/enable')
kprobeevents = self.kprobeText(kname, kprobe)
if not kprobeevents:
return False
try:
self.fsetVal(kprobeevents, 'kprobe_events')
check = self.fgetVal('kprobe_events')
except:
return False
linesout = len(kprobeevents.split('\n'))
linesack = len(check.split('\n'))
if linesack < linesout:
return False
return True
def fsetVal(self, val, path, mode='w'):
file = self.tpath+path
if not os.path.exists(file):
return False
try:
fp = open(file, mode, 0)
fp.write(val)
fp.flush()
fp.close()
except:
return False
return True
def fgetVal(self, path):
file = self.tpath+path
res = ''
if not os.path.exists(file):
return res
try:
fp = open(file, 'r')
res = fp.read()
fp.close()
except:
pass
return res
def cleanupFtrace(self):
if(self.usecallgraph or self.usetraceevents):
self.fsetVal('0', 'events/kprobes/enable')
self.fsetVal('', 'kprobe_events')
def setupAllKprobes(self):
for name in self.tracefuncs:
self.defaultKprobe(name, self.tracefuncs[name])
for name in self.dev_tracefuncs:
self.defaultKprobe(name, self.dev_tracefuncs[name])
def isCallgraphFunc(self, name):
if len(self.tracefuncs) < 1 and self.suspendmode == 'command':
return True
for i in self.tracefuncs:
if 'func' in self.tracefuncs[i]:
f = self.tracefuncs[i]['func']
else:
f = i
if name == f:
return True
return False
def initFtrace(self, testing=False):
print('INITIALIZING FTRACE...')
# turn trace off
self.fsetVal('0', 'tracing_on')
self.cleanupFtrace()
# set the trace clock to global
self.fsetVal('global', 'trace_clock')
self.fsetVal('nop', 'current_tracer')
# set trace buffer to a huge value
if self.usecallgraph or self.usedevsrc:
tgtsize = min(self.memtotal / 2, 2*1024*1024)
maxbuf = '%d' % (tgtsize / max(1, self.cpucount))
if self.cpucount < 1 or not self.fsetVal(maxbuf, 'buffer_size_kb'):
self.fsetVal('131072', 'buffer_size_kb')
else:
self.fsetVal('16384', 'buffer_size_kb')
# go no further if this is just a status check
if testing:
return
# initialize the callgraph trace
if(self.usecallgraph):
# set trace type
self.fsetVal('function_graph', 'current_tracer')
self.fsetVal('', 'set_ftrace_filter')
# set trace format options
self.fsetVal('print-parent', 'trace_options')
self.fsetVal('funcgraph-abstime', 'trace_options')
self.fsetVal('funcgraph-cpu', 'trace_options')
self.fsetVal('funcgraph-duration', 'trace_options')
self.fsetVal('funcgraph-proc', 'trace_options')
self.fsetVal('funcgraph-tail', 'trace_options')
self.fsetVal('nofuncgraph-overhead', 'trace_options')
self.fsetVal('context-info', 'trace_options')
self.fsetVal('graph-time', 'trace_options')
self.fsetVal('%d' % self.max_graph_depth, 'max_graph_depth')
cf = ['dpm_run_callback']
if(self.usetraceeventsonly):
cf += ['dpm_prepare', 'dpm_complete']
for fn in self.tracefuncs:
if 'func' in self.tracefuncs[fn]:
cf.append(self.tracefuncs[fn]['func'])
else:
cf.append(fn)
self.setFtraceFilterFunctions(cf)
# initialize the kprobe trace
elif self.usekprobes:
for name in self.tracefuncs:
self.defaultKprobe(name, self.tracefuncs[name])
if self.usedevsrc:
for name in self.dev_tracefuncs:
self.defaultKprobe(name, self.dev_tracefuncs[name])
print('INITIALIZING KPROBES...')
self.addKprobes(self.verbose)
if(self.usetraceevents):
# turn trace events on
events = iter(self.traceevents)
for e in events:
self.fsetVal('1', 'events/power/'+e+'/enable')
# clear the trace buffer
self.fsetVal('', 'trace')
def verifyFtrace(self):
# files needed for any trace data
files = ['buffer_size_kb', 'current_tracer', 'trace', 'trace_clock',
'trace_marker', 'trace_options', 'tracing_on']
# files needed for callgraph trace data
tp = self.tpath
if(self.usecallgraph):
files += [
'available_filter_functions',
'set_ftrace_filter',
'set_graph_function'
]
for f in files:
if(os.path.exists(tp+f) == False):
return False
return True
def verifyKprobes(self):
# files needed for kprobes to work
files = ['kprobe_events', 'events']
tp = self.tpath
for f in files:
if(os.path.exists(tp+f) == False):
return False
return True
def colorText(self, str, color=31):
if not self.ansi:
return str
return '\x1B[%d;40m%s\x1B[m' % (color, str)
def writeDatafileHeader(self, filename, fwdata=[]):
fp = open(filename, 'w')
fp.write(self.teststamp+'\n')
fp.write(self.sysstamp+'\n')
if(self.suspendmode == 'mem' or self.suspendmode == 'command'):
for fw in fwdata:
if(fw):
fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1]))
fp.close()
sysvals = SystemValues()
suspendmodename = {
'freeze': 'Freeze (S0)',
'standby': 'Standby (S1)',
'mem': 'Suspend (S3)',
'disk': 'Hibernate (S4)'
}
# Class: DevProps
# Description:
# Simple class which holds property values collected
# for all the devices used in the timeline.
class DevProps:
syspath = ''
altname = ''
async = True
xtraclass = ''
xtrainfo = ''
def out(self, dev):
return '%s,%s,%d;' % (dev, self.altname, self.async)
def debug(self, dev):
print '%s:\n\taltname = %s\n\t async = %s' % (dev, self.altname, self.async)
def altName(self, dev):
if not self.altname or self.altname == dev:
return dev
return '%s [%s]' % (self.altname, dev)
def xtraClass(self):
if self.xtraclass:
return ' '+self.xtraclass
if not self.async:
return ' sync'
return ''
def xtraInfo(self):
if self.xtraclass:
return ' '+self.xtraclass
if self.async:
return ' async_device'
return ' sync_device'
# Class: DeviceNode
# Description:
# A container used to create a device hierachy, with a single root node
# and a tree of child nodes. Used by Data.deviceTopology()
class DeviceNode:
name = ''
children = 0
depth = 0
def __init__(self, nodename, nodedepth):
self.name = nodename
self.children = []
self.depth = nodedepth
# Class: Data
# Description:
# The primary container for suspend/resume test data. There is one for
# each test run. The data is organized into a cronological hierarchy:
# Data.dmesg {
# phases {
# 10 sequential, non-overlapping phases of S/R
# contents: times for phase start/end, order/color data for html
# devlist {
# device callback or action list for this phase
# device {
# a single device callback or generic action
# contents: start/stop times, pid/cpu/driver info
# parents/children, html id for timeline/callgraph
# optionally includes an ftrace callgraph
# optionally includes dev/ps data
# }
# }
# }
# }
#
class Data:
dmesg = {} # root data structure
phases = [] # ordered list of phases
start = 0.0 # test start
end = 0.0 # test end
tSuspended = 0.0 # low-level suspend start
tResumed = 0.0 # low-level resume start
tKernSus = 0.0 # kernel level suspend start
tKernRes = 0.0 # kernel level resume end
tLow = 0.0 # time spent in low-level suspend (standby/freeze)
fwValid = False # is firmware data available
fwSuspend = 0 # time spent in firmware suspend
fwResume = 0 # time spent in firmware resume
dmesgtext = [] # dmesg text file in memory
pstl = 0 # process timeline
testnumber = 0
idstr = ''
html_device_id = 0
stamp = 0
outfile = ''
devpids = []
kerror = False
def __init__(self, num):
idchar = 'abcdefghij'
self.pstl = dict()
self.testnumber = num
self.idstr = idchar[num]
self.dmesgtext = []
self.phases = []
self.dmesg = { # fixed list of 10 phases
'suspend_prepare': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#CCFFCC', 'order': 0},
'suspend': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#88FF88', 'order': 1},
'suspend_late': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#00AA00', 'order': 2},
'suspend_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#008888', 'order': 3},
'suspend_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#0000FF', 'order': 4},
'resume_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FF0000', 'order': 5},
'resume_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FF9900', 'order': 6},
'resume_early': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFCC00', 'order': 7},
'resume': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFFF88', 'order': 8},
'resume_complete': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFFFCC', 'order': 9}
}
self.phases = self.sortedPhases()
self.devicegroups = []
for phase in self.phases:
self.devicegroups.append([phase])
self.errorinfo = {'suspend':[],'resume':[]}
def extractErrorInfo(self, dmesg):
error = ''
tm = 0.0
for i in range(len(dmesg)):
if 'Call Trace:' in dmesg[i]:
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) .*', dmesg[i])
if not m:
continue
tm = float(m.group('ktime'))
if tm < self.start or tm > self.end:
continue
for j in range(i-10, i+1):
error += dmesg[j]
continue
if error:
m = re.match('[ \t]*\[ *[0-9\.]*\] \[\<[0-9a-fA-F]*\>\] .*', dmesg[i])
if m:
error += dmesg[i]
else:
if tm < self.tSuspended:
dir = 'suspend'
else:
dir = 'resume'
error = error.replace('<', '<').replace('>', '>')
vprint('kernel error found in %s at %f' % (dir, tm))
self.errorinfo[dir].append((tm, error))
self.kerror = True
error = ''
def setStart(self, time):
self.start = time
def setEnd(self, time):
self.end = time
def isTraceEventOutsideDeviceCalls(self, pid, time):
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time < d['end']):
return False
return True
def sourcePhase(self, start):
for phase in self.phases:
pend = self.dmesg[phase]['end']
if start <= pend:
return phase
return 'resume_complete'
def sourceDevice(self, phaselist, start, end, pid, type):
tgtdev = ''
for phase in phaselist:
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
# pid must match
if dev['pid'] != pid:
continue
devS = dev['start']
devE = dev['end']
if type == 'device':
# device target event is entirely inside the source boundary
if(start < devS or start >= devE or end <= devS or end > devE):
continue
elif type == 'thread':
# thread target event will expand the source boundary
if start < devS:
dev['start'] = start
if end > devE:
dev['end'] = end
tgtdev = dev
break
return tgtdev
def addDeviceFunctionCall(self, displayname, kprobename, proc, pid, start, end, cdata, rdata):
# try to place the call in a device
tgtdev = self.sourceDevice(self.phases, start, end, pid, 'device')
# calls with device pids that occur outside device bounds are dropped
# TODO: include these somehow
if not tgtdev and pid in self.devpids:
return False
# try to place the call in a thread
if not tgtdev:
tgtdev = self.sourceDevice(self.phases, start, end, pid, 'thread')
# create new thread blocks, expand as new calls are found
if not tgtdev:
if proc == '<...>':
threadname = 'kthread-%d' % (pid)
else:
threadname = '%s-%d' % (proc, pid)
tgtphase = self.sourcePhase(start)
self.newAction(tgtphase, threadname, pid, '', start, end, '', ' kth', '')
return self.addDeviceFunctionCall(displayname, kprobename, proc, pid, start, end, cdata, rdata)
# this should not happen
if not tgtdev:
vprint('[%f - %f] %s-%d %s %s %s' % \
(start, end, proc, pid, kprobename, cdata, rdata))
return False
# place the call data inside the src element of the tgtdev
if('src' not in tgtdev):
tgtdev['src'] = []
dtf = sysvals.dev_tracefuncs
ubiquitous = False
if kprobename in dtf and 'ub' in dtf[kprobename]:
ubiquitous = True
title = cdata+' '+rdata
mstr = '\(.*\) *(?P<args>.*) *\((?P<caller>.*)\+.* arg1=(?P<ret>.*)'
m = re.match(mstr, title)
if m:
c = m.group('caller')
a = m.group('args').strip()
r = m.group('ret')
if len(r) > 6:
r = ''
else:
r = 'ret=%s ' % r
if ubiquitous and c in dtf and 'ub' in dtf[c]:
return False
color = sysvals.kprobeColor(kprobename)
e = DevFunction(displayname, a, c, r, start, end, ubiquitous, proc, pid, color)
tgtdev['src'].append(e)
return True
def overflowDevices(self):
# get a list of devices that extend beyond the end of this test run
devlist = []
for phase in self.phases:
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
if dev['end'] > self.end:
devlist.append(dev)
return devlist
def mergeOverlapDevices(self, devlist):
# merge any devices that overlap devlist
for dev in devlist:
devname = dev['name']
for phase in self.phases:
list = self.dmesg[phase]['list']
if devname not in list:
continue
tdev = list[devname]
o = min(dev['end'], tdev['end']) - max(dev['start'], tdev['start'])
if o <= 0:
continue
dev['end'] = tdev['end']
if 'src' not in dev or 'src' not in tdev:
continue
dev['src'] += tdev['src']
del list[devname]
def usurpTouchingThread(self, name, dev):
# the caller test has priority of this thread, give it to him
for phase in self.phases:
list = self.dmesg[phase]['list']
if name in list:
tdev = list[name]
if tdev['start'] - dev['end'] < 0.1:
dev['end'] = tdev['end']
if 'src' not in dev:
dev['src'] = []
if 'src' in tdev:
dev['src'] += tdev['src']
del list[name]
break
def stitchTouchingThreads(self, testlist):
# merge any threads between tests that touch
for phase in self.phases:
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
if 'htmlclass' not in dev or 'kth' not in dev['htmlclass']:
continue
for data in testlist:
data.usurpTouchingThread(devname, dev)
def optimizeDevSrc(self):
# merge any src call loops to reduce timeline size
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
if 'src' not in list[dev]:
continue
src = list[dev]['src']
p = 0
for e in sorted(src, key=lambda event: event.time):
if not p or not e.repeat(p):
p = e
continue
# e is another iteration of p, move it into p
p.end = e.end
p.length = p.end - p.time
p.count += 1
src.remove(e)
def trimTimeVal(self, t, t0, dT, left):
if left:
if(t > t0):
if(t - dT < t0):
return t0
return t - dT
else:
return t
else:
if(t < t0 + dT):
if(t > t0):
return t0 + dT
return t + dT
else:
return t
def trimTime(self, t0, dT, left):
self.tSuspended = self.trimTimeVal(self.tSuspended, t0, dT, left)
self.tResumed = self.trimTimeVal(self.tResumed, t0, dT, left)
self.start = self.trimTimeVal(self.start, t0, dT, left)
self.tKernSus = self.trimTimeVal(self.tKernSus, t0, dT, left)
self.tKernRes = self.trimTimeVal(self.tKernRes, t0, dT, left)
self.end = self.trimTimeVal(self.end, t0, dT, left)
for phase in self.phases:
p = self.dmesg[phase]
p['start'] = self.trimTimeVal(p['start'], t0, dT, left)
p['end'] = self.trimTimeVal(p['end'], t0, dT, left)
list = p['list']
for name in list:
d = list[name]
d['start'] = self.trimTimeVal(d['start'], t0, dT, left)
d['end'] = self.trimTimeVal(d['end'], t0, dT, left)
if('ftrace' in d):
cg = d['ftrace']
cg.start = self.trimTimeVal(cg.start, t0, dT, left)
cg.end = self.trimTimeVal(cg.end, t0, dT, left)
for line in cg.list:
line.time = self.trimTimeVal(line.time, t0, dT, left)
if('src' in d):
for e in d['src']:
e.time = self.trimTimeVal(e.time, t0, dT, left)
def normalizeTime(self, tZero):
# trim out any standby or freeze clock time
if(self.tSuspended != self.tResumed):
if(self.tResumed > tZero):
self.trimTime(self.tSuspended, \
self.tResumed-self.tSuspended, True)
else:
self.trimTime(self.tSuspended, \
self.tResumed-self.tSuspended, False)
def getTimeValues(self):
sktime = (self.dmesg['suspend_machine']['end'] - \
self.tKernSus) * 1000
rktime = (self.dmesg['resume_complete']['end'] - \
self.dmesg['resume_machine']['start']) * 1000
return (sktime, rktime)
def setPhase(self, phase, ktime, isbegin):
if(isbegin):
self.dmesg[phase]['start'] = ktime
else:
self.dmesg[phase]['end'] = ktime
def dmesgSortVal(self, phase):
return self.dmesg[phase]['order']
def sortedPhases(self):
return sorted(self.dmesg, key=self.dmesgSortVal)
def sortedDevices(self, phase):
list = self.dmesg[phase]['list']
slist = []
tmp = dict()
for devname in list:
dev = list[devname]
if dev['length'] == 0:
continue
tmp[dev['start']] = devname
for t in sorted(tmp):
slist.append(tmp[t])
return slist
def fixupInitcalls(self, phase):
# if any calls never returned, clip them at system resume end
phaselist = self.dmesg[phase]['list']
for devname in phaselist:
dev = phaselist[devname]
if(dev['end'] < 0):
for p in self.phases:
if self.dmesg[p]['end'] > dev['start']:
dev['end'] = self.dmesg[p]['end']
break
vprint('%s (%s): callback didnt return' % (devname, phase))
def deviceFilter(self, devicefilter):
for phase in self.phases:
list = self.dmesg[phase]['list']
rmlist = []
for name in list:
keep = False
for filter in devicefilter:
if filter in name or \
('drv' in list[name] and filter in list[name]['drv']):
keep = True
if not keep:
rmlist.append(name)
for name in rmlist:
del list[name]
def fixupInitcallsThatDidntReturn(self):
# if any calls never returned, clip them at system resume end
for phase in self.phases:
self.fixupInitcalls(phase)
def phaseOverlap(self, phases):
rmgroups = []
newgroup = []
for group in self.devicegroups:
for phase in phases:
if phase not in group:
continue
for p in group:
if p not in newgroup:
newgroup.append(p)
if group not in rmgroups:
rmgroups.append(group)
for group in rmgroups:
self.devicegroups.remove(group)
self.devicegroups.append(newgroup)
def newActionGlobal(self, name, start, end, pid=-1, color=''):
# which phase is this device callback or action in
targetphase = 'none'
htmlclass = ''
overlap = 0.0
phases = []
for phase in self.phases:
pstart = self.dmesg[phase]['start']
pend = self.dmesg[phase]['end']
# see if the action overlaps this phase
o = max(0, min(end, pend) - max(start, pstart))
if o > 0:
phases.append(phase)
# set the target phase to the one that overlaps most
if o > overlap:
if overlap > 0 and phase == 'post_resume':
continue
targetphase = phase
overlap = o
# if no target phase was found, pin it to the edge
if targetphase == 'none':
p0start = self.dmesg[self.phases[0]]['start']
if start <= p0start:
targetphase = self.phases[0]
else:
targetphase = self.phases[-1]
if pid == -2:
htmlclass = ' bg'
elif pid == -3:
htmlclass = ' ps'
if len(phases) > 1:
htmlclass = ' bg'
self.phaseOverlap(phases)
if targetphase in self.phases:
newname = self.newAction(targetphase, name, pid, '', start, end, '', htmlclass, color)
return (targetphase, newname)
return False
def newAction(self, phase, name, pid, parent, start, end, drv, htmlclass='', color=''):
# new device callback for a specific phase
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
if pid == -2:
i = 2
origname = name
while(name in list):
name = '%s[%d]' % (origname, i)
i += 1
list[name] = {'name': name, 'start': start, 'end': end, 'pid': pid,
'par': parent, 'length': length, 'row': 0, 'id': devid, 'drv': drv }
if htmlclass:
list[name]['htmlclass'] = htmlclass
if color:
list[name]['color'] = color
return name
def deviceChildren(self, devname, phase):
devlist = []
list = self.dmesg[phase]['list']
for child in list:
if(list[child]['par'] == devname):
devlist.append(child)
return devlist
def printDetails(self):
vprint('Timeline Details:')
vprint(' test start: %f' % self.start)
vprint('kernel suspend start: %f' % self.tKernSus)
for phase in self.phases:
dc = len(self.dmesg[phase]['list'])
vprint(' %16s: %f - %f (%d devices)' % (phase, \
self.dmesg[phase]['start'], self.dmesg[phase]['end'], dc))
vprint(' kernel resume end: %f' % self.tKernRes)
vprint(' test end: %f' % self.end)
def deviceChildrenAllPhases(self, devname):
devlist = []
for phase in self.phases:
list = self.deviceChildren(devname, phase)
for dev in list:
if dev not in devlist:
devlist.append(dev)
return devlist
def masterTopology(self, name, list, depth):
node = DeviceNode(name, depth)
for cname in list:
# avoid recursions
if name == cname:
continue
clist = self.deviceChildrenAllPhases(cname)
cnode = self.masterTopology(cname, clist, depth+1)
node.children.append(cnode)
return node
def printTopology(self, node):
html = ''
if node.name:
info = ''
drv = ''
for phase in self.phases:
list = self.dmesg[phase]['list']
if node.name in list:
s = list[node.name]['start']
e = list[node.name]['end']
if list[node.name]['drv']:
drv = ' {'+list[node.name]['drv']+'}'
info += ('<li>%s: %.3fms</li>' % (phase, (e-s)*1000))
html += '<li><b>'+node.name+drv+'</b>'
if info:
html += '<ul>'+info+'</ul>'
html += '</li>'
if len(node.children) > 0:
html += '<ul>'
for cnode in node.children:
html += self.printTopology(cnode)
html += '</ul>'
return html
def rootDeviceList(self):
# list of devices graphed
real = []
for phase in self.dmesg:
list = self.dmesg[phase]['list']
for dev in list:
if list[dev]['pid'] >= 0 and dev not in real:
real.append(dev)
# list of top-most root devices
rootlist = []
for phase in self.dmesg:
list = self.dmesg[phase]['list']
for dev in list:
pdev = list[dev]['par']
pid = list[dev]['pid']
if(pid < 0 or re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
continue
if pdev and pdev not in real and pdev not in rootlist:
rootlist.append(pdev)
return rootlist
def deviceTopology(self):
rootlist = self.rootDeviceList()
master = self.masterTopology('', rootlist, 0)
return self.printTopology(master)
def selectTimelineDevices(self, widfmt, tTotal, mindevlen):
# only select devices that will actually show up in html
self.tdevlist = dict()
for phase in self.dmesg:
devlist = []
list = self.dmesg[phase]['list']
for dev in list:
length = (list[dev]['end'] - list[dev]['start']) * 1000
width = widfmt % (((list[dev]['end']-list[dev]['start'])*100)/tTotal)
if width != '0.000000' and length >= mindevlen:
devlist.append(dev)
self.tdevlist[phase] = devlist
def addHorizontalDivider(self, devname, devend):
phase = 'suspend_prepare'
self.newAction(phase, devname, -2, '', \
self.start, devend, '', ' sec', '')
if phase not in self.tdevlist:
self.tdevlist[phase] = []
self.tdevlist[phase].append(devname)
d = DevItem(0, phase, self.dmesg[phase]['list'][devname])
return d
def addProcessUsageEvent(self, name, times):
# get the start and end times for this process
maxC = 0
tlast = 0
start = -1
end = -1
for t in sorted(times):
if tlast == 0:
tlast = t
continue
if name in self.pstl[t]:
if start == -1 or tlast < start:
start = tlast
if end == -1 or t > end:
end = t
tlast = t
if start == -1 or end == -1:
return 0
# add a new action for this process and get the object
out = self.newActionGlobal(name, start, end, -3)
if not out:
return 0
phase, devname = out
dev = self.dmesg[phase]['list'][devname]
# get the cpu exec data
tlast = 0
clast = 0
cpuexec = dict()
for t in sorted(times):
if tlast == 0 or t <= start or t > end:
tlast = t
continue
list = self.pstl[t]
c = 0
if name in list:
c = list[name]
if c > maxC:
maxC = c
if c != clast:
key = (tlast, t)
cpuexec[key] = c
tlast = t
clast = c
dev['cpuexec'] = cpuexec
return maxC
def createProcessUsageEvents(self):
# get an array of process names
proclist = []
for t in self.pstl:
pslist = self.pstl[t]
for ps in pslist:
if ps not in proclist:
proclist.append(ps)
# get a list of data points for suspend and resume
tsus = []
tres = []
for t in sorted(self.pstl):
if t < self.tSuspended:
tsus.append(t)
else:
tres.append(t)
# process the events for suspend and resume
if len(proclist) > 0:
vprint('Process Execution:')
for ps in proclist:
c = self.addProcessUsageEvent(ps, tsus)
if c > 0:
vprint('%25s (sus): %d' % (ps, c))
c = self.addProcessUsageEvent(ps, tres)
if c > 0:
vprint('%25s (res): %d' % (ps, c))
# Class: DevFunction
# Description:
# A container for kprobe function data we want in the dev timeline
class DevFunction:
row = 0
count = 1
def __init__(self, name, args, caller, ret, start, end, u, proc, pid, color):
self.name = name
self.args = args
self.caller = caller
self.ret = ret
self.time = start
self.length = end - start
self.end = end
self.ubiquitous = u
self.proc = proc
self.pid = pid
self.color = color
def title(self):
cnt = ''
if self.count > 1:
cnt = '(x%d)' % self.count
l = '%0.3fms' % (self.length * 1000)
if self.ubiquitous:
title = '%s(%s)%s <- %s, %s(%s)' % \
(self.name, self.args, cnt, self.caller, self.ret, l)
else:
title = '%s(%s) %s%s(%s)' % (self.name, self.args, self.ret, cnt, l)
return title.replace('"', '')
def text(self):
if self.count > 1:
text = '%s(x%d)' % (self.name, self.count)
else:
text = self.name
return text
def repeat(self, tgt):
# is the tgt call just a repeat of this call (e.g. are we in a loop)
dt = self.time - tgt.end
# only combine calls if -all- attributes are identical
if tgt.caller == self.caller and \
tgt.name == self.name and tgt.args == self.args and \
tgt.proc == self.proc and tgt.pid == self.pid and \
tgt.ret == self.ret and dt >= 0 and \
dt <= sysvals.callloopmaxgap and \
self.length < sysvals.callloopmaxlen:
return True
return False
# Class: FTraceLine
# Description:
# A container for a single line of ftrace data. There are six basic types:
# callgraph line:
# call: " dpm_run_callback() {"
# return: " }"
# leaf: " dpm_run_callback();"
# trace event:
# tracing_mark_write: SUSPEND START or RESUME COMPLETE
# suspend_resume: phase or custom exec block data
# device_pm_callback: device callback info
class FTraceLine:
time = 0.0
length = 0.0
fcall = False
freturn = False
fevent = False
fkprobe = False
depth = 0
name = ''
type = ''
def __init__(self, t, m='', d=''):
self.time = float(t)
if not m and not d:
return
# is this a trace event
if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
if(d == 'traceevent'):
# nop format trace event
msg = m
else:
# function_graph format trace event
em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)
msg = em.group('msg')
emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg)
if(emm):
self.name = emm.group('msg')
self.type = emm.group('call')
else:
self.name = msg
km = re.match('^(?P<n>.*)_cal$', self.type)
if km:
self.fcall = True
self.fkprobe = True
self.type = km.group('n')
return
km = re.match('^(?P<n>.*)_ret$', self.type)
if km:
self.freturn = True
self.fkprobe = True
self.type = km.group('n')
return
self.fevent = True
return
# convert the duration to seconds
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
match = re.match('^(?P<d> *)(?P<o>.*)$', m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
m = match.group('o')
# function return
if(m[0] == '}'):
self.freturn = True
if(len(m) > 1):
# includes comment with function name
match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m)
if(match):
self.name = match.group('n').strip()
# function call
else:
self.fcall = True
# function call with children
if(m[-1] == '{'):
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# something else (possibly a trace marker)
else:
self.name = m
def getDepth(self, str):
return len(str)/2
def debugPrint(self, dev=''):
if(self.freturn and self.fcall):
print('%s -- %f (%02d): %s(); (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
elif(self.freturn):
print('%s -- %f (%02d): %s} (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
else:
print('%s -- %f (%02d): %s() { (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
def startMarker(self):
# Is this the starting line of a suspend?
if not self.fevent:
return False
if sysvals.usetracemarkers:
if(self.name == 'SUSPEND START'):
return True
return False
else:
if(self.type == 'suspend_resume' and
re.match('suspend_enter\[.*\] begin', self.name)):
return True
return False
def endMarker(self):
# Is this the ending line of a resume?
if not self.fevent:
return False
if sysvals.usetracemarkers:
if(self.name == 'RESUME COMPLETE'):
return True
return False
else:
if(self.type == 'suspend_resume' and
re.match('thaw_processes\[.*\] end', self.name)):
return True
return False
# Class: FTraceCallGraph
# Description:
# A container for the ftrace callgraph of a single recursive function.
# This can be a dpm_run_callback, dpm_prepare, or dpm_complete callgraph
# Each instance is tied to a single device in a single phase, and is
# comprised of an ordered list of FTraceLine objects
class FTraceCallGraph:
id = ''
start = -1.0
end = -1.0
list = []
invalid = False
depth = 0
pid = 0
name = ''
def __init__(self, pid):
self.start = -1.0
self.end = -1.0
self.list = []
self.depth = 0
self.pid = pid
def addLine(self, line, debug=False):
# if this is already invalid, just leave
if(self.invalid):
return False
# invalidate on too much data or bad depth
if(len(self.list) >= 1000000 or self.depth < 0):
self.invalidate(line)
return False
# compare current depth with this lines pre-call depth
prelinedep = line.depth
if(line.freturn and not line.fcall):
prelinedep += 1
last = 0
lasttime = line.time
virtualfname = 'missing_function_name'
if len(self.list) > 0:
last = self.list[-1]
lasttime = last.time
# handle low misalignments by inserting returns
if prelinedep < self.depth:
if debug and last:
print '-------- task %d --------' % self.pid
last.debugPrint()
idx = 0
# add return calls to get the depth down
while prelinedep < self.depth:
if debug:
print 'MISALIGN LOW (add returns): C%d - eC%d' % (self.depth, prelinedep)
self.depth -= 1
if idx == 0 and last and last.fcall and not last.freturn:
# special case, turn last call into a leaf
last.depth = self.depth
last.freturn = True
last.length = line.time - last.time
if debug:
last.debugPrint()
else:
vline = FTraceLine(lasttime)
vline.depth = self.depth
vline.name = virtualfname
vline.freturn = True
self.list.append(vline)
if debug:
vline.debugPrint()
idx += 1
if debug:
line.debugPrint()
print ''
# handle high misalignments by inserting calls
elif prelinedep > self.depth:
if debug and last:
print '-------- task %d --------' % self.pid
last.debugPrint()
idx = 0
# add calls to get the depth up
while prelinedep > self.depth:
if debug:
print 'MISALIGN HIGH (add calls): C%d - eC%d' % (self.depth, prelinedep)
if idx == 0 and line.freturn and not line.fcall:
# special case, turn this return into a leaf
line.fcall = True
prelinedep -= 1
else:
vline = FTraceLine(lasttime)
vline.depth = self.depth
vline.name = virtualfname
vline.fcall = True
if debug:
vline.debugPrint()
self.list.append(vline)
self.depth += 1
if not last:
self.start = vline.time
idx += 1
if debug:
line.debugPrint()
print ''
# process the call and set the new depth
if(line.fcall and not line.freturn):
self.depth += 1
elif(line.freturn and not line.fcall):
self.depth -= 1
if len(self.list) < 1:
self.start = line.time
self.list.append(line)
if(line.depth == 0 and line.freturn):
if(self.start < 0):
self.start = line.time
self.end = line.time
if line.fcall:
self.end += line.length
if self.list[0].name == virtualfname:
self.invalid = True
return True
return False
def invalidate(self, line):
if(len(self.list) > 0):
first = self.list[0]
self.list = []
self.list.append(first)
self.invalid = True
id = 'task %s' % (self.pid)
window = '(%f - %f)' % (self.start, line.time)
if(self.depth < 0):
vprint('Too much data for '+id+\
' (buffer overflow), ignoring this callback')
else:
vprint('Too much data for '+id+\
' '+window+', ignoring this callback')
def slice(self, t0, tN):
minicg = FTraceCallGraph(0)
count = -1
firstdepth = 0
for l in self.list:
if(l.time < t0 or l.time > tN):
continue
if(count < 0):
if(not l.fcall or l.name == 'dev_driver_string'):
continue
firstdepth = l.depth
count = 0
l.depth -= firstdepth
minicg.addLine(l)
if((count == 0 and l.freturn and l.fcall) or
(count > 0 and l.depth <= 0)):
break
count += 1
return minicg
def repair(self, enddepth):
# bring the depth back to 0 with additional returns
fixed = False
last = self.list[-1]
for i in reversed(range(enddepth)):
t = FTraceLine(last.time)
t.depth = i
t.freturn = True
fixed = self.addLine(t)
if fixed:
self.end = last.time
return True
return False
def postProcess(self, debug=False):
if len(self.list) > 0:
self.name = self.list[0].name
stack = dict()
cnt = 0
last = 0
for l in self.list:
# ftrace bug: reported duration is not reliable
# check each leaf and clip it at max possible length
if(last and last.freturn and last.fcall):
if last.length > l.time - last.time:
last.length = l.time - last.time
if(l.fcall and not l.freturn):
stack[l.depth] = l
cnt += 1
elif(l.freturn and not l.fcall):
if(l.depth not in stack):
if debug:
print 'Post Process Error: Depth missing'
l.debugPrint()
return False
# calculate call length from call/return lines
stack[l.depth].length = l.time - stack[l.depth].time
stack.pop(l.depth)
l.length = 0
cnt -= 1
last = l
if(cnt == 0):
# trace caught the whole call tree
return True
elif(cnt < 0):
if debug:
print 'Post Process Error: Depth is less than 0'
return False
# trace ended before call tree finished
return self.repair(cnt)
def deviceMatch(self, pid, data):
found = False
# add the callgraph data to the device hierarchy
borderphase = {
'dpm_prepare': 'suspend_prepare',
'dpm_complete': 'resume_complete'
}
if(self.name in borderphase):
p = borderphase[self.name]
list = data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
self.start <= dev['start'] and
self.end >= dev['end']):
dev['ftrace'] = self.slice(dev['start'], dev['end'])
found = True
return found
for p in data.phases:
if(data.dmesg[p]['start'] <= self.start and
self.start <= data.dmesg[p]['end']):
list = data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
self.start <= dev['start'] and
self.end >= dev['end']):
dev['ftrace'] = self
found = True
break
break
return found
def newActionFromFunction(self, data):
name = self.name
if name in ['dpm_run_callback', 'dpm_prepare', 'dpm_complete']:
return
fs = self.start
fe = self.end
if fs < data.start or fe > data.end:
return
phase = ''
for p in data.phases:
if(data.dmesg[p]['start'] <= self.start and
self.start < data.dmesg[p]['end']):
phase = p
break
if not phase:
return
out = data.newActionGlobal(name, fs, fe, -2)
if out:
phase, myname = out
data.dmesg[phase]['list'][myname]['ftrace'] = self
def debugPrint(self):
print('[%f - %f] %s (%d)') % (self.start, self.end, self.name, self.pid)
for l in self.list:
if(l.freturn and l.fcall):
print('%f (%02d): %s(); (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
elif(l.freturn):
print('%f (%02d): %s} (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
else:
print('%f (%02d): %s() { (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
print(' ')
class DevItem:
def __init__(self, test, phase, dev):
self.test = test
self.phase = phase
self.dev = dev
def isa(self, cls):
if 'htmlclass' in self.dev and cls in self.dev['htmlclass']:
return True
return False
# Class: Timeline
# Description:
# A container for a device timeline which calculates
# all the html properties to display it correctly
class Timeline:
html = ''
height = 0 # total timeline height
scaleH = 20 # timescale (top) row height
rowH = 30 # device row height
bodyH = 0 # body height
rows = 0 # total timeline rows
rowlines = dict()
rowheight = dict()
html_tblock = '<div id="block{0}" class="tblock" style="left:{1}%;width:{2}%;"><div class="tback" style="height:{3}px"></div>\n'
html_device = '<div id="{0}" title="{1}" class="thread{7}" style="left:{2}%;top:{3}px;height:{4}px;width:{5}%;{8}">{6}</div>\n'
html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}px;height:{3}px;background:{4}">{5}</div>\n'
html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background:{3}"></div>\n'
html_legend = '<div id="p{3}" class="square" style="left:{0}%;background:{1}"> {2}</div>\n'
def __init__(self, rowheight, scaleheight):
self.rowH = rowheight
self.scaleH = scaleheight
self.html = ''
def createHeader(self, sv):
if(not sv.stamp['time']):
return
self.html += '<div class="version"><a href="https://01.org/suspendresume">%s v%s</a></div>' \
% (sv.title, sv.version)
if sv.logmsg and sv.testlog:
self.html += '<button id="showtest" class="logbtn btnfmt">log</button>'
if sv.dmesglog:
self.html += '<button id="showdmesg" class="logbtn btnfmt">dmesg</button>'
if sv.ftracelog:
self.html += '<button id="showftrace" class="logbtn btnfmt">ftrace</button>'
headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n'
self.html += headline_stamp.format(sv.stamp['host'], sv.stamp['kernel'],
sv.stamp['mode'], sv.stamp['time'])
if 'man' in sv.stamp and 'plat' in sv.stamp and 'cpu' in sv.stamp:
headline_sysinfo = '<div class="stamp sysinfo">{0} {1} <i>with</i> {2}</div>\n'
self.html += headline_sysinfo.format(sv.stamp['man'],
sv.stamp['plat'], sv.stamp['cpu'])
# Function: getDeviceRows
# Description:
# determine how may rows the device funcs will take
# Arguments:
# rawlist: the list of devices/actions for a single phase
# Output:
# The total number of rows needed to display this phase of the timeline
def getDeviceRows(self, rawlist):
# clear all rows and set them to undefined
sortdict = dict()
for item in rawlist:
item.row = -1
sortdict[item] = item.length
sortlist = sorted(sortdict, key=sortdict.get, reverse=True)
remaining = len(sortlist)
rowdata = dict()
row = 1
# try to pack each row with as many ranges as possible
while(remaining > 0):
if(row not in rowdata):
rowdata[row] = []
for i in sortlist:
if(i.row >= 0):
continue
s = i.time
e = i.time + i.length
valid = True
for ritem in rowdata[row]:
rs = ritem.time
re = ritem.time + ritem.length
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(i)
i.row = row
remaining -= 1
row += 1
return row
# Function: getPhaseRows
# Description:
# Organize the timeline entries into the smallest
# number of rows possible, with no entry overlapping
# Arguments:
# devlist: the list of devices/actions in a group of contiguous phases
# Output:
# The total number of rows needed to display this phase of the timeline
def getPhaseRows(self, devlist, row=0, sortby='length'):
# clear all rows and set them to undefined
remaining = len(devlist)
rowdata = dict()
sortdict = dict()
myphases = []
# initialize all device rows to -1 and calculate devrows
for item in devlist:
dev = item.dev
tp = (item.test, item.phase)
if tp not in myphases:
myphases.append(tp)
dev['row'] = -1
if sortby == 'start':
# sort by start 1st, then length 2nd
sortdict[item] = (-1*float(dev['start']), float(dev['end']) - float(dev['start']))
else:
# sort by length 1st, then name 2nd
sortdict[item] = (float(dev['end']) - float(dev['start']), item.dev['name'])
if 'src' in dev:
dev['devrows'] = self.getDeviceRows(dev['src'])
# sort the devlist by length so that large items graph on top
sortlist = sorted(sortdict, key=sortdict.get, reverse=True)
orderedlist = []
for item in sortlist:
if item.dev['pid'] == -2:
orderedlist.append(item)
for item in sortlist:
if item not in orderedlist:
orderedlist.append(item)
# try to pack each row with as many devices as possible
while(remaining > 0):
rowheight = 1
if(row not in rowdata):
rowdata[row] = []
for item in orderedlist:
dev = item.dev
if(dev['row'] < 0):
s = dev['start']
e = dev['end']
valid = True
for ritem in rowdata[row]:
rs = ritem.dev['start']
re = ritem.dev['end']
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(item)
dev['row'] = row
remaining -= 1
if 'devrows' in dev and dev['devrows'] > rowheight:
rowheight = dev['devrows']
for t, p in myphases:
if t not in self.rowlines or t not in self.rowheight:
self.rowlines[t] = dict()
self.rowheight[t] = dict()
if p not in self.rowlines[t] or p not in self.rowheight[t]:
self.rowlines[t][p] = dict()
self.rowheight[t][p] = dict()
rh = self.rowH
# section headers should use a different row height
if len(rowdata[row]) == 1 and \
'htmlclass' in rowdata[row][0].dev and \
'sec' in rowdata[row][0].dev['htmlclass']:
rh = 15
self.rowlines[t][p][row] = rowheight
self.rowheight[t][p][row] = rowheight * rh
row += 1
if(row > self.rows):
self.rows = int(row)
return row
def phaseRowHeight(self, test, phase, row):
return self.rowheight[test][phase][row]
def phaseRowTop(self, test, phase, row):
top = 0
for i in sorted(self.rowheight[test][phase]):
if i >= row:
break
top += self.rowheight[test][phase][i]
return top
def calcTotalRows(self):
# Calculate the heights and offsets for the header and rows
maxrows = 0
standardphases = []
for t in self.rowlines:
for p in self.rowlines[t]:
total = 0
for i in sorted(self.rowlines[t][p]):
total += self.rowlines[t][p][i]
if total > maxrows:
maxrows = total
if total == len(self.rowlines[t][p]):
standardphases.append((t, p))
self.height = self.scaleH + (maxrows*self.rowH)
self.bodyH = self.height - self.scaleH
# if there is 1 line per row, draw them the standard way
for t, p in standardphases:
for i in sorted(self.rowheight[t][p]):
self.rowheight[t][p][i] = self.bodyH/len(self.rowlines[t][p])
def createZoomBox(self, mode='command', testcount=1):
# Create bounding box, add buttons
html_zoombox = '<center><button id="zoomin">ZOOM IN +</button><button id="zoomout">ZOOM OUT -</button><button id="zoomdef">ZOOM 1:1</button></center>\n'
html_timeline = '<div id="dmesgzoombox" class="zoombox">\n<div id="{0}" class="timeline" style="height:{1}px">\n'
html_devlist1 = '<button id="devlist1" class="devlist" style="float:left;">Device Detail{0}</button>'
html_devlist2 = '<button id="devlist2" class="devlist" style="float:right;">Device Detail2</button>\n'
if mode != 'command':
if testcount > 1:
self.html += html_devlist2
self.html += html_devlist1.format('1')
else:
self.html += html_devlist1.format('')
self.html += html_zoombox
self.html += html_timeline.format('dmesg', self.height)
# Function: createTimeScale
# Description:
# Create the timescale for a timeline block
# Arguments:
# m0: start time (mode begin)
# mMax: end time (mode end)
# tTotal: total timeline time
# mode: suspend or resume
# Output:
# The html code needed to display the time scale
def createTimeScale(self, m0, mMax, tTotal, mode):
timescale = '<div class="t" style="right:{0}%">{1}</div>\n'
rline = '<div class="t" style="left:0;border-left:1px solid black;border-right:0;">{0}</div>\n'
output = '<div class="timescale">\n'
# set scale for timeline
mTotal = mMax - m0
tS = 0.1
if(tTotal <= 0):
return output+'</div>\n'
if(tTotal > 4):
tS = 1
divTotal = int(mTotal/tS) + 1
divEdge = (mTotal - tS*(divTotal-1))*100/mTotal
for i in range(divTotal):
htmlline = ''
if(mode == 'suspend'):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/mTotal) - divEdge)
val = '%0.fms' % (float(i-divTotal+1)*tS*1000)
if(i == divTotal - 1):
val = mode
htmlline = timescale.format(pos, val)
else:
pos = '%0.3f' % (100 - ((float(i)*tS*100)/mTotal))
val = '%0.fms' % (float(i)*tS*1000)
htmlline = timescale.format(pos, val)
if(i == 0):
htmlline = rline.format(mode)
output += htmlline
self.html += output+'</div>\n'
# Class: TestProps
# Description:
# A list of values describing the properties of these test runs
class TestProps:
stamp = ''
sysinfo = ''
S0i3 = False
fwdata = []
stampfmt = '# [a-z]*-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
'(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
sysinfofmt = '^# sysinfo .*'
ftrace_line_fmt_fg = \
'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
'[ +!#\*@$]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
ftrace_line_fmt_nop = \
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
'(?P<flags>.{4}) *(?P<time>[0-9\.]*): *'+\
'(?P<msg>.*)'
ftrace_line_fmt = ftrace_line_fmt_nop
cgformat = False
data = 0
ktemp = dict()
def __init__(self):
self.ktemp = dict()
def setTracerType(self, tracer):
if(tracer == 'function_graph'):
self.cgformat = True
self.ftrace_line_fmt = self.ftrace_line_fmt_fg
elif(tracer == 'nop'):
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
else:
doError('Invalid tracer format: [%s]' % tracer)
def parseStamp(self, data, sv):
m = re.match(self.stampfmt, self.stamp)
data.stamp = {'time': '', 'host': '', 'mode': ''}
dt = datetime(int(m.group('y'))+2000, int(m.group('m')),
int(m.group('d')), int(m.group('H')), int(m.group('M')),
int(m.group('S')))
data.stamp['time'] = dt.strftime('%B %d %Y, %I:%M:%S %p')
data.stamp['host'] = m.group('host')
data.stamp['mode'] = m.group('mode')
data.stamp['kernel'] = m.group('kernel')
if re.match(self.sysinfofmt, self.sysinfo):
for f in self.sysinfo.split('|'):
if '#' in f:
continue
tmp = f.strip().split(':', 1)
key = tmp[0]
val = tmp[1]
data.stamp[key] = val
sv.hostname = data.stamp['host']
sv.suspendmode = data.stamp['mode']
if sv.suspendmode == 'command' and sv.ftracefile != '':
modes = ['on', 'freeze', 'standby', 'mem']
out = Popen(['grep', 'suspend_enter', sv.ftracefile],
stderr=PIPE, stdout=PIPE).stdout.read()
m = re.match('.* suspend_enter\[(?P<mode>.*)\]', out)
if m and m.group('mode') in ['1', '2', '3']:
sv.suspendmode = modes[int(m.group('mode'))]
data.stamp['mode'] = sv.suspendmode
if not sv.stamp:
sv.stamp = data.stamp
# Class: TestRun
# Description:
# A container for a suspend/resume test run. This is necessary as
# there could be more than one, and they need to be separate.
class TestRun:
ftemp = dict()
ttemp = dict()
data = 0
def __init__(self, dataobj):
self.data = dataobj
self.ftemp = dict()
self.ttemp = dict()
class ProcessMonitor:
proclist = dict()
running = False
def procstat(self):
c = ['cat /proc/[1-9]*/stat 2>/dev/null']
process = Popen(c, shell=True, stdout=PIPE)
running = dict()
for line in process.stdout:
data = line.split()
pid = data[0]
name = re.sub('[()]', '', data[1])
user = int(data[13])
kern = int(data[14])
kjiff = ujiff = 0
if pid not in self.proclist:
self.proclist[pid] = {'name' : name, 'user' : user, 'kern' : kern}
else:
val = self.proclist[pid]
ujiff = user - val['user']
kjiff = kern - val['kern']
val['user'] = user
val['kern'] = kern
if ujiff > 0 or kjiff > 0:
running[pid] = ujiff + kjiff
process.wait()
out = ''
for pid in running:
jiffies = running[pid]
val = self.proclist[pid]
if out:
out += ','
out += '%s-%s %d' % (val['name'], pid, jiffies)
return 'ps - '+out
def processMonitor(self, tid):
while self.running:
out = self.procstat()
if out:
sysvals.fsetVal(out, 'trace_marker')
def start(self):
self.thread = Thread(target=self.processMonitor, args=(0,))
self.running = True
self.thread.start()
def stop(self):
self.running = False
# ----------------- FUNCTIONS --------------------
# Function: vprint
# Description:
# verbose print (prints only with -verbose option)
# Arguments:
# msg: the debug/log message to print
def vprint(msg):
sysvals.logmsg += msg+'\n'
if(sysvals.verbose):
print(msg)
# Function: doesTraceLogHaveTraceEvents
# Description:
# Quickly determine if the ftrace log has some or all of the trace events
# required for primary parsing. Set the usetraceevents and/or
# usetraceeventsonly flags in the global sysvals object
def doesTraceLogHaveTraceEvents():
# check for kprobes
sysvals.usekprobes = False
out = call('grep -q "_cal: (" '+sysvals.ftracefile, shell=True)
if(out == 0):
sysvals.usekprobes = True
# check for callgraph data on trace event blocks
out = call('grep -q "_cpu_down()" '+sysvals.ftracefile, shell=True)
if(out == 0):
sysvals.usekprobes = True
out = Popen(['head', '-1', sysvals.ftracefile],
stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
# figure out what level of trace events are supported
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = False
for e in sysvals.traceevents:
out = call('grep -q "'+e+': " '+sysvals.ftracefile, shell=True)
if(out != 0):
sysvals.usetraceeventsonly = False
if(e == 'suspend_resume' and out == 0):
sysvals.usetraceevents = True
# determine is this log is properly formatted
for e in ['SUSPEND START', 'RESUME COMPLETE']:
out = call('grep -q "'+e+'" '+sysvals.ftracefile, shell=True)
if(out != 0):
sysvals.usetracemarkers = False
# Function: appendIncompleteTraceLog
# Description:
# [deprecated for kernel 3.15 or newer]
# Legacy support of ftrace outputs that lack the device_pm_callback
# and/or suspend_resume trace events. The primary data should be
# taken from dmesg, and this ftrace is used only for callgraph data
# or custom actions in the timeline. The data is appended to the Data
# objects provided.
# Arguments:
# testruns: the array of Data objects obtained from parseKernelLog
def appendIncompleteTraceLog(testruns):
# create TestRun vessels for ftrace parsing
testcnt = len(testruns)
testidx = 0
testrun = []
for data in testruns:
testrun.append(TestRun(data))
# extract the callgraph and traceevent data
vprint('Analyzing the ftrace data...')
tp = TestProps()
tf = open(sysvals.ftracefile, 'r')
data = 0
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
# grab the stamp and sysinfo
if re.match(tp.stampfmt, line):
tp.stamp = line
continue
elif re.match(tp.sysinfofmt, line):
tp.sysinfo = line
continue
# determine the trace data type (required for further parsing)
m = re.match(sysvals.tracertypefmt, line)
if(m):
tp.setTracerType(m.group('t'))
continue
# device properties line
if(re.match(sysvals.devpropfmt, line)):
devProps(line)
continue
# parse only valid lines, if this is not one move on
m = re.match(tp.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(tp.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# look for the suspend start marker
if(t.startMarker()):
data = testrun[testidx].data
tp.parseStamp(data, sysvals)
data.setStart(t.time)
continue
if(not data):
continue
# find the end of resume
if(t.endMarker()):
data.setEnd(t.time)
testidx += 1
if(testidx >= testcnt):
break
continue
# trace event processing
if(t.fevent):
# general trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
if(m):
val = m.group('val')
if val == '0':
name = m.group('name')
else:
name = m.group('name')+'['+val+']'
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# special processing for trace events
if re.match('dpm_prepare\[.*', name):
continue
elif re.match('machine_suspend.*', name):
continue
elif re.match('suspend_enter\[.*', name):
if(not isbegin):
data.dmesg['suspend_prepare']['end'] = t.time
continue
elif re.match('dpm_suspend\[.*', name):
if(not isbegin):
data.dmesg['suspend']['end'] = t.time
continue
elif re.match('dpm_suspend_late\[.*', name):
if(isbegin):
data.dmesg['suspend_late']['start'] = t.time
else:
data.dmesg['suspend_late']['end'] = t.time
continue
elif re.match('dpm_suspend_noirq\[.*', name):
if(isbegin):
data.dmesg['suspend_noirq']['start'] = t.time
else:
data.dmesg['suspend_noirq']['end'] = t.time
continue
elif re.match('dpm_resume_noirq\[.*', name):
if(isbegin):
data.dmesg['resume_machine']['end'] = t.time
data.dmesg['resume_noirq']['start'] = t.time
else:
data.dmesg['resume_noirq']['end'] = t.time
continue
elif re.match('dpm_resume_early\[.*', name):
if(isbegin):
data.dmesg['resume_early']['start'] = t.time
else:
data.dmesg['resume_early']['end'] = t.time
continue
elif re.match('dpm_resume\[.*', name):
if(isbegin):
data.dmesg['resume']['start'] = t.time
else:
data.dmesg['resume']['end'] = t.time
continue
elif re.match('dpm_complete\[.*', name):
if(isbegin):
data.dmesg['resume_complete']['start'] = t.time
else:
data.dmesg['resume_complete']['end'] = t.time
continue
# skip trace events inside devices calls
if(not data.isTraceEventOutsideDeviceCalls(pid, t.time)):
continue
# global events (outside device calls) are simply graphed
if(isbegin):
# store each trace event in ttemp
if(name not in testrun[testidx].ttemp):
testrun[testidx].ttemp[name] = []
testrun[testidx].ttemp[name].append(\
{'begin': t.time, 'end': t.time})
else:
# finish off matching trace event in ttemp
if(name in testrun[testidx].ttemp):
testrun[testidx].ttemp[name][-1]['end'] = t.time
# call/return processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
if(pid not in testrun[testidx].ftemp):
testrun[testidx].ftemp[pid] = []
testrun[testidx].ftemp[pid].append(FTraceCallGraph(pid))
# when the call is finished, see which device matches it
cg = testrun[testidx].ftemp[pid][-1]
if(cg.addLine(t)):
testrun[testidx].ftemp[pid].append(FTraceCallGraph(pid))
tf.close()
for test in testrun:
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
for name in test.ttemp:
for event in test.ttemp[name]:
test.data.newActionGlobal(name, event['begin'], event['end'])
# add the callgraph data to the device hierarchy
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if len(cg.list) < 1 or cg.invalid:
continue
if(not cg.postProcess()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
for p in test.data.phases:
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
test.data.printDetails()
# Function: parseTraceLog
# Description:
# Analyze an ftrace log output file generated from this app during
# the execution phase. Used when the ftrace log is the primary data source
# and includes the suspend_resume and device_pm_callback trace events
# The ftrace filename is taken from sysvals
# Output:
# An array of Data objects
def parseTraceLog():
vprint('Analyzing the ftrace data...')
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
sysvals.setupAllKprobes()
tracewatch = []
if sysvals.usekprobes:
tracewatch += ['sync_filesystems', 'freeze_processes', 'syscore_suspend',
'syscore_resume', 'resume_console', 'thaw_processes', 'CPU_ON', 'CPU_OFF']
# extract the callgraph and traceevent data
tp = TestProps()
testruns = []
testdata = []
testrun = 0
data = 0
tf = open(sysvals.ftracefile, 'r')
phase = 'suspend_prepare'
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
# stamp and sysinfo lines
if re.match(tp.stampfmt, line):
tp.stamp = line
continue
elif re.match(tp.sysinfofmt, line):
tp.sysinfo = line
continue
# firmware line: pull out any firmware data
m = re.match(sysvals.firmwarefmt, line)
if(m):
tp.fwdata.append((int(m.group('s')), int(m.group('r'))))
continue
# tracer type line: determine the trace data type
m = re.match(sysvals.tracertypefmt, line)
if(m):
tp.setTracerType(m.group('t'))
continue
# device properties line
if(re.match(sysvals.devpropfmt, line)):
devProps(line)
continue
# ignore all other commented lines
if line[0] == '#':
continue
# ftrace line: parse only valid lines
m = re.match(tp.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_proc = m.group('proc')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(tp.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# find the start of suspend
if(t.startMarker()):
phase = 'suspend_prepare'
data = Data(len(testdata))
testdata.append(data)
testrun = TestRun(data)
testruns.append(testrun)
tp.parseStamp(data, sysvals)
data.setStart(t.time)
data.tKernSus = t.time
continue
if(not data):
continue
# process cpu exec line
if t.type == 'tracing_mark_write':
m = re.match(sysvals.procexecfmt, t.name)
if(m):
proclist = dict()
for ps in m.group('ps').split(','):
val = ps.split()
if not val:
continue
name = val[0].replace('--', '-')
proclist[name] = int(val[1])
data.pstl[t.time] = proclist
continue
# find the end of resume
if(t.endMarker()):
data.setEnd(t.time)
if data.tKernRes == 0.0:
data.tKernRes = t.time
if data.dmesg['resume_complete']['end'] < 0:
data.dmesg['resume_complete']['end'] = t.time
if sysvals.suspendmode == 'mem' and len(tp.fwdata) > data.testnumber:
data.fwSuspend, data.fwResume = tp.fwdata[data.testnumber]
if(data.tSuspended != 0 and data.tResumed != 0 and \
(data.fwSuspend > 0 or data.fwResume > 0)):
data.fwValid = True
if(not sysvals.usetracemarkers):
# no trace markers? then quit and be sure to finish recording
# the event we used to trigger resume end
if(len(testrun.ttemp['thaw_processes']) > 0):
# if an entry exists, assume this is its end
testrun.ttemp['thaw_processes'][-1]['end'] = t.time
break
continue
# trace event processing
if(t.fevent):
if(phase == 'post_resume'):
data.setEnd(t.time)
if(t.type == 'suspend_resume'):
# suspend_resume trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
if(m):
val = m.group('val')
if val == '0':
name = m.group('name')
else:
name = m.group('name')+'['+val+']'
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# ignore these events
if(name.split('[')[0] in tracewatch):
continue
# -- phase changes --
# start of kernel suspend
if(re.match('suspend_enter\[.*', t.name)):
if(isbegin):
data.dmesg[phase]['start'] = t.time
data.tKernSus = t.time
continue
# suspend_prepare start
elif(re.match('dpm_prepare\[.*', t.name)):
phase = 'suspend_prepare'
if(not isbegin):
data.dmesg[phase]['end'] = t.time
continue
# suspend start
elif(re.match('dpm_suspend\[.*', t.name)):
phase = 'suspend'
data.setPhase(phase, t.time, isbegin)
continue
# suspend_late start
elif(re.match('dpm_suspend_late\[.*', t.name)):
phase = 'suspend_late'
data.setPhase(phase, t.time, isbegin)
continue
# suspend_noirq start
elif(re.match('dpm_suspend_noirq\[.*', t.name)):
phase = 'suspend_noirq'
data.setPhase(phase, t.time, isbegin)
if(not isbegin):
phase = 'suspend_machine'
data.dmesg[phase]['start'] = t.time
continue
# suspend_machine/resume_machine
elif(re.match('machine_suspend\[.*', t.name)):
if(isbegin):
phase = 'suspend_machine'
data.dmesg[phase]['end'] = t.time
data.tSuspended = t.time
else:
if(sysvals.suspendmode in ['mem', 'disk'] and not tp.S0i3):
data.dmesg['suspend_machine']['end'] = t.time
data.tSuspended = t.time
phase = 'resume_machine'
data.dmesg[phase]['start'] = t.time
data.tResumed = t.time
data.tLow = data.tResumed - data.tSuspended
continue
# acpi_suspend
elif(re.match('acpi_suspend\[.*', t.name)):
# acpi_suspend[0] S0i3
if(re.match('acpi_suspend\[0\] begin', t.name)):
if(sysvals.suspendmode == 'mem'):
tp.S0i3 = True
data.dmesg['suspend_machine']['end'] = t.time
data.tSuspended = t.time
continue
# resume_noirq start
elif(re.match('dpm_resume_noirq\[.*', t.name)):
phase = 'resume_noirq'
data.setPhase(phase, t.time, isbegin)
if(isbegin):
data.dmesg['resume_machine']['end'] = t.time
continue
# resume_early start
elif(re.match('dpm_resume_early\[.*', t.name)):
phase = 'resume_early'
data.setPhase(phase, t.time, isbegin)
continue
# resume start
elif(re.match('dpm_resume\[.*', t.name)):
phase = 'resume'
data.setPhase(phase, t.time, isbegin)
continue
# resume complete start
elif(re.match('dpm_complete\[.*', t.name)):
phase = 'resume_complete'
if(isbegin):
data.dmesg[phase]['start'] = t.time
continue
# skip trace events inside devices calls
if(not data.isTraceEventOutsideDeviceCalls(pid, t.time)):
continue
# global events (outside device calls) are graphed
if(name not in testrun.ttemp):
testrun.ttemp[name] = []
if(isbegin):
# create a new list entry
testrun.ttemp[name].append(\
{'begin': t.time, 'end': t.time, 'pid': pid})
else:
if(len(testrun.ttemp[name]) > 0):
# if an entry exists, assume this is its end
testrun.ttemp[name][-1]['end'] = t.time
elif(phase == 'post_resume'):
# post resume events can just have ends
testrun.ttemp[name].append({
'begin': data.dmesg[phase]['start'],
'end': t.time})
# device callback start
elif(t.type == 'device_pm_callback_start'):
m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
t.name);
if(not m):
continue
drv = m.group('drv')
n = m.group('d')
p = m.group('p')
if(n and p):
data.newAction(phase, n, pid, p, t.time, -1, drv)
if pid not in data.devpids:
data.devpids.append(pid)
# device callback finish
elif(t.type == 'device_pm_callback_end'):
m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
if(not m):
continue
n = m.group('d')
list = data.dmesg[phase]['list']
if(n in list):
dev = list[n]
dev['length'] = t.time - dev['start']
dev['end'] = t.time
# kprobe event processing
elif(t.fkprobe):
kprobename = t.type
kprobedata = t.name
key = (kprobename, pid)
# displayname is generated from kprobe data
displayname = ''
if(t.fcall):
displayname = sysvals.kprobeDisplayName(kprobename, kprobedata)
if not displayname:
continue
if(key not in tp.ktemp):
tp.ktemp[key] = []
tp.ktemp[key].append({
'pid': pid,
'begin': t.time,
'end': t.time,
'name': displayname,
'cdata': kprobedata,
'proc': m_proc,
})
elif(t.freturn):
if(key not in tp.ktemp) or len(tp.ktemp[key]) < 1:
continue
e = tp.ktemp[key][-1]
if e['begin'] < 0.0 or t.time - e['begin'] < 0.000001:
tp.ktemp[key].pop()
else:
e['end'] = t.time
e['rdata'] = kprobedata
# end of kernel resume
if(kprobename == 'pm_notifier_call_chain' or \
kprobename == 'pm_restore_console'):
data.dmesg[phase]['end'] = t.time
data.tKernRes = t.time
# callgraph processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
key = (m_proc, pid)
if(key not in testrun.ftemp):
testrun.ftemp[key] = []
testrun.ftemp[key].append(FTraceCallGraph(pid))
# when the call is finished, see which device matches it
cg = testrun.ftemp[key][-1]
if(cg.addLine(t)):
testrun.ftemp[key].append(FTraceCallGraph(pid))
tf.close()
if sysvals.suspendmode == 'command':
for test in testruns:
for p in test.data.phases:
if p == 'suspend_prepare':
test.data.dmesg[p]['start'] = test.data.start
test.data.dmesg[p]['end'] = test.data.end
else:
test.data.dmesg[p]['start'] = test.data.end
test.data.dmesg[p]['end'] = test.data.end
test.data.tSuspended = test.data.end
test.data.tResumed = test.data.end
test.data.tLow = 0
test.data.fwValid = False
# dev source and procmon events can be unreadable with mixed phase height
if sysvals.usedevsrc or sysvals.useprocmon:
sysvals.mixedphaseheight = False
for i in range(len(testruns)):
test = testruns[i]
data = test.data
# find the total time range for this test (begin, end)
tlb, tle = data.start, data.end
if i < len(testruns) - 1:
tle = testruns[i+1].data.start
# add the process usage data to the timeline
if sysvals.useprocmon:
data.createProcessUsageEvents()
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
# add actual trace funcs
for name in test.ttemp:
for event in test.ttemp[name]:
data.newActionGlobal(name, event['begin'], event['end'], event['pid'])
# add the kprobe based virtual tracefuncs as actual devices
for key in tp.ktemp:
name, pid = key
if name not in sysvals.tracefuncs:
continue
for e in tp.ktemp[key]:
kb, ke = e['begin'], e['end']
if kb == ke or tlb > kb or tle <= kb:
continue
color = sysvals.kprobeColor(name)
data.newActionGlobal(e['name'], kb, ke, pid, color)
# add config base kprobes and dev kprobes
if sysvals.usedevsrc:
for key in tp.ktemp:
name, pid = key
if name in sysvals.tracefuncs or name not in sysvals.dev_tracefuncs:
continue
for e in tp.ktemp[key]:
kb, ke = e['begin'], e['end']
if kb == ke or tlb > kb or tle <= kb:
continue
data.addDeviceFunctionCall(e['name'], name, e['proc'], pid, kb,
ke, e['cdata'], e['rdata'])
if sysvals.usecallgraph:
# add the callgraph data to the device hierarchy
sortlist = dict()
for key in test.ftemp:
proc, pid = key
for cg in test.ftemp[key]:
if len(cg.list) < 1 or cg.invalid:
continue
if(not cg.postProcess()):
id = 'task %s' % (pid)
vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
# match cg data to devices
if sysvals.suspendmode == 'command' or not cg.deviceMatch(pid, data):
sortkey = '%f%f%d' % (cg.start, cg.end, pid)
sortlist[sortkey] = cg
# create blocks for orphan cg data
for sortkey in sorted(sortlist):
cg = sortlist[sortkey]
name = cg.name
if sysvals.isCallgraphFunc(name):
vprint('Callgraph found for task %d: %.3fms, %s' % (cg.pid, (cg.end - cg.start)*1000, name))
cg.newActionFromFunction(data)
if sysvals.suspendmode == 'command':
for data in testdata:
data.printDetails()
return testdata
# fill in any missing phases
for data in testdata:
lp = data.phases[0]
for p in data.phases:
if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
vprint('WARNING: phase "%s" is missing!' % p)
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == 'resume_machine'):
data.tSuspended = data.dmesg[lp]['end']
data.tResumed = data.dmesg[lp]['end']
data.tLow = 0
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
if(p != lp and not ('machine' in p and 'machine' in lp)):
data.dmesg[lp]['end'] = data.dmesg[p]['start']
lp = p
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
if sysvals.usedevsrc:
data.optimizeDevSrc()
data.printDetails()
# x2: merge any overlapping devices between test runs
if sysvals.usedevsrc and len(testdata) > 1:
tc = len(testdata)
for i in range(tc - 1):
devlist = testdata[i].overflowDevices()
for j in range(i + 1, tc):
testdata[j].mergeOverlapDevices(devlist)
testdata[0].stitchTouchingThreads(testdata[1:])
return testdata
# Function: loadKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# load the dmesg file into memory and fix up any ordering issues
# The dmesg filename is taken from sysvals
# Output:
# An array of empty Data objects with only their dmesgtext attributes set
def loadKernelLog(justtext=False):
vprint('Analyzing the dmesg data...')
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s does not exist' % sysvals.dmesgfile)
if justtext:
dmesgtext = []
# there can be multiple test runs in a single file
tp = TestProps()
tp.stamp = datetime.now().strftime('# suspend-%m%d%y-%H%M%S localhost mem unknown')
testruns = []
data = 0
lf = open(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
# grab the stamp and sysinfo
if re.match(tp.stampfmt, line):
tp.stamp = line
continue
elif re.match(tp.sysinfofmt, line):
tp.sysinfo = line
continue
m = re.match(sysvals.firmwarefmt, line)
if(m):
tp.fwdata.append((int(m.group('s')), int(m.group('r'))))
continue
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
msg = m.group("msg")
if justtext:
dmesgtext.append(line)
continue
if(re.match('PM: Syncing filesystems.*', msg)):
if(data):
testruns.append(data)
data = Data(len(testruns))
tp.parseStamp(data, sysvals)
if len(tp.fwdata) > data.testnumber:
data.fwSuspend, data.fwResume = tp.fwdata[data.testnumber]
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
if(not data):
continue
m = re.match('.* *(?P<k>[0-9]\.[0-9]{2}\.[0-9]-.*) .*', msg)
if(m):
sysvals.stamp['kernel'] = m.group('k')
m = re.match('PM: Preparing system for (?P<m>.*) sleep', msg)
if(m):
sysvals.stamp['mode'] = sysvals.suspendmode = m.group('m')
data.dmesgtext.append(line)
lf.close()
if justtext:
return dmesgtext
if data:
testruns.append(data)
if len(testruns) < 1:
doError(' dmesg log has no suspend/resume data: %s' \
% sysvals.dmesgfile)
# fix lines with same timestamp/function with the call and return swapped
for data in testruns:
last = ''
for line in data.dmesgtext:
mc = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
'(?P<f>.*)\+ @ .*, parent: .*', line)
mr = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
'(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', last)
if(mc and mr and (mc.group('t') == mr.group('t')) and
(mc.group('f') == mr.group('f'))):
i = data.dmesgtext.index(last)
j = data.dmesgtext.index(line)
data.dmesgtext[i] = line
data.dmesgtext[j] = last
last = line
return testruns
# Function: parseKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# Analyse a dmesg log output file generated from this app during
# the execution phase. Create a set of device structures in memory
# for subsequent formatting in the html output file
# This call is only for legacy support on kernels where the ftrace
# data lacks the suspend_resume or device_pm_callbacks trace events.
# Arguments:
# data: an empty Data object (with dmesgtext) obtained from loadKernelLog
# Output:
# The filled Data object
def parseKernelLog(data):
phase = 'suspend_runtime'
if(data.fwValid):
vprint('Firmware Suspend = %u ns, Firmware Resume = %u ns' % \
(data.fwSuspend, data.fwResume))
# dmesg phase match table
dm = {
'suspend_prepare': 'PM: Syncing filesystems.*',
'suspend': 'PM: Entering [a-z]* sleep.*',
'suspend_late': 'PM: suspend of devices complete after.*',
'suspend_noirq': 'PM: late suspend of devices complete after.*',
'suspend_machine': 'PM: noirq suspend of devices complete after.*',
'resume_machine': 'ACPI: Low-level resume complete.*',
'resume_noirq': 'ACPI: Waking up from system sleep state.*',
'resume_early': 'PM: noirq resume of devices complete after.*',
'resume': 'PM: early resume of devices complete after.*',
'resume_complete': 'PM: resume of devices complete after.*',
'post_resume': '.*Restarting tasks \.\.\..*',
}
if(sysvals.suspendmode == 'standby'):
dm['resume_machine'] = 'PM: Restoring platform NVS memory'
elif(sysvals.suspendmode == 'disk'):
dm['suspend_late'] = 'PM: freeze of devices complete after.*'
dm['suspend_noirq'] = 'PM: late freeze of devices complete after.*'
dm['suspend_machine'] = 'PM: noirq freeze of devices complete after.*'
dm['resume_machine'] = 'PM: Restoring platform NVS memory'
dm['resume_early'] = 'PM: noirq restore of devices complete after.*'
dm['resume'] = 'PM: early restore of devices complete after.*'
dm['resume_complete'] = 'PM: restore of devices complete after.*'
elif(sysvals.suspendmode == 'freeze'):
dm['resume_machine'] = 'ACPI: resume from mwait'
# action table (expected events that occur and show up in dmesg)
at = {
'sync_filesystems': {
'smsg': 'PM: Syncing filesystems.*',
'emsg': 'PM: Preparing system for mem sleep.*' },
'freeze_user_processes': {
'smsg': 'Freezing user space processes .*',
'emsg': 'Freezing remaining freezable tasks.*' },
'freeze_tasks': {
'smsg': 'Freezing remaining freezable tasks.*',
'emsg': 'PM: Entering (?P<mode>[a-z,A-Z]*) sleep.*' },
'ACPI prepare': {
'smsg': 'ACPI: Preparing to enter system sleep state.*',
'emsg': 'PM: Saving platform NVS memory.*' },
'PM vns': {
'smsg': 'PM: Saving platform NVS memory.*',
'emsg': 'Disabling non-boot CPUs .*' },
}
t0 = -1.0
cpu_start = -1.0
prevktime = -1.0
actions = dict()
for line in data.dmesgtext:
# parse each dmesg line into the time and message
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
val = m.group('ktime')
try:
ktime = float(val)
except:
continue
msg = m.group('msg')
# initialize data start to first line time
if t0 < 0:
data.setStart(ktime)
t0 = ktime
else:
continue
# hack for determining resume_machine end for freeze
if(not sysvals.usetraceevents and sysvals.suspendmode == 'freeze' \
and phase == 'resume_machine' and \
re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
data.dmesg['resume_machine']['end'] = ktime
phase = 'resume_noirq'
data.dmesg[phase]['start'] = ktime
# suspend start
if(re.match(dm['suspend_prepare'], msg)):
phase = 'suspend_prepare'
data.dmesg[phase]['start'] = ktime
data.setStart(ktime)
data.tKernSus = ktime
# suspend start
elif(re.match(dm['suspend'], msg)):
data.dmesg['suspend_prepare']['end'] = ktime
phase = 'suspend'
data.dmesg[phase]['start'] = ktime
# suspend_late start
elif(re.match(dm['suspend_late'], msg)):
data.dmesg['suspend']['end'] = ktime
phase = 'suspend_late'
data.dmesg[phase]['start'] = ktime
# suspend_noirq start
elif(re.match(dm['suspend_noirq'], msg)):
data.dmesg['suspend_late']['end'] = ktime
phase = 'suspend_noirq'
data.dmesg[phase]['start'] = ktime
# suspend_machine start
elif(re.match(dm['suspend_machine'], msg)):
data.dmesg['suspend_noirq']['end'] = ktime
phase = 'suspend_machine'
data.dmesg[phase]['start'] = ktime
# resume_machine start
elif(re.match(dm['resume_machine'], msg)):
if(sysvals.suspendmode in ['freeze', 'standby']):
data.tSuspended = prevktime
data.dmesg['suspend_machine']['end'] = prevktime
else:
data.tSuspended = ktime
data.dmesg['suspend_machine']['end'] = ktime
phase = 'resume_machine'
data.tResumed = ktime
data.tLow = data.tResumed - data.tSuspended
data.dmesg[phase]['start'] = ktime
# resume_noirq start
elif(re.match(dm['resume_noirq'], msg)):
data.dmesg['resume_machine']['end'] = ktime
phase = 'resume_noirq'
data.dmesg[phase]['start'] = ktime
# resume_early start
elif(re.match(dm['resume_early'], msg)):
data.dmesg['resume_noirq']['end'] = ktime
phase = 'resume_early'
data.dmesg[phase]['start'] = ktime
# resume start
elif(re.match(dm['resume'], msg)):
data.dmesg['resume_early']['end'] = ktime
phase = 'resume'
data.dmesg[phase]['start'] = ktime
# resume complete start
elif(re.match(dm['resume_complete'], msg)):
data.dmesg['resume']['end'] = ktime
phase = 'resume_complete'
data.dmesg[phase]['start'] = ktime
# post resume start
elif(re.match(dm['post_resume'], msg)):
data.dmesg['resume_complete']['end'] = ktime
data.setEnd(ktime)
data.tKernRes = ktime
break
# -- device callbacks --
if(phase in data.phases):
# device init call
if(re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
sm = re.match('calling (?P<f>.*)\+ @ '+\
'(?P<n>.*), parent: (?P<p>.*)', msg);
f = sm.group('f')
n = sm.group('n')
p = sm.group('p')
if(f and n and p):
data.newAction(phase, f, int(n), p, ktime, -1, '')
# device init return
elif(re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs', msg)):
sm = re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs(?P<a>.*)', msg);
f = sm.group('f')
t = sm.group('t')
list = data.dmesg[phase]['list']
if(f in list):
dev = list[f]
dev['length'] = int(t)
dev['end'] = ktime
# if trace events are not available, these are better than nothing
if(not sysvals.usetraceevents):
# look for known actions
for a in at:
if(re.match(at[a]['smsg'], msg)):
if(a not in actions):
actions[a] = []
actions[a].append({'begin': ktime, 'end': ktime})
if(re.match(at[a]['emsg'], msg)):
if(a in actions):
actions[a][-1]['end'] = ktime
# now look for CPU on/off events
if(re.match('Disabling non-boot CPUs .*', msg)):
# start of first cpu suspend
cpu_start = ktime
elif(re.match('Enabling non-boot CPUs .*', msg)):
# start of first cpu resume
cpu_start = ktime
elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)):
# end of a cpu suspend, start of the next
m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)):
# end of a cpu resume, start of the next
m = re.match('CPU(?P<cpu>[0-9]*) is up', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
prevktime = ktime
# fill in any missing phases
lp = data.phases[0]
for p in data.phases:
if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
print('WARNING: phase "%s" is missing, something went wrong!' % p)
print(' In %s, this dmesg line denotes the start of %s:' % \
(sysvals.suspendmode, p))
print(' "%s"' % dm[p])
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == 'resume_machine'):
data.tSuspended = data.dmesg[lp]['end']
data.tResumed = data.dmesg[lp]['end']
data.tLow = 0
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
lp = p
# fill in any actions we've found
for name in actions:
for event in actions[name]:
data.newActionGlobal(name, event['begin'], event['end'])
data.printDetails()
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
return True
def callgraphHTML(sv, hf, num, cg, title, color, devid):
html_func_top = '<article id="{0}" class="atop" style="background:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n'
html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n'
html_func_end = '</article>\n'
html_func_leaf = '<article>{0} {1}</article>\n'
cgid = devid
if cg.id:
cgid += cg.id
cglen = (cg.end - cg.start) * 1000
if cglen < sv.mincglen:
return num
fmt = '<r>(%.3f ms @ '+sv.timeformat+' to '+sv.timeformat+')</r>'
flen = fmt % (cglen, cg.start, cg.end)
hf.write(html_func_top.format(cgid, color, num, title, flen))
num += 1
for line in cg.list:
if(line.length < 0.000000001):
flen = ''
else:
fmt = '<n>(%.3f ms @ '+sv.timeformat+')</n>'
flen = fmt % (line.length*1000, line.time)
if(line.freturn and line.fcall):
hf.write(html_func_leaf.format(line.name, flen))
elif(line.freturn):
hf.write(html_func_end)
else:
hf.write(html_func_start.format(num, line.name, flen))
num += 1
hf.write(html_func_end)
return num
def addCallgraphs(sv, hf, data):
hf.write('<section id="callgraphs" class="callgraph">\n')
# write out the ftrace data converted to html
num = 0
for p in data.phases:
if sv.cgphase and p != sv.cgphase:
continue
list = data.dmesg[p]['list']
for devname in data.sortedDevices(p):
if len(sv.devicefilter) > 0 and devname not in sv.devicefilter:
continue
dev = list[devname]
color = 'white'
if 'color' in data.dmesg[p]:
color = data.dmesg[p]['color']
if 'color' in dev:
color = dev['color']
name = devname
if(devname in sv.devprops):
name = sv.devprops[devname].altName(devname)
if sv.suspendmode in suspendmodename:
name += ' '+p
if('ftrace' in dev):
cg = dev['ftrace']
num = callgraphHTML(sv, hf, num, cg,
name, color, dev['id'])
if('ftraces' in dev):
for cg in dev['ftraces']:
num = callgraphHTML(sv, hf, num, cg,
name+' → '+cg.name, color, dev['id'])
hf.write('\n\n </section>\n')
# Function: createHTMLSummarySimple
# Description:
# Create summary html file for a series of tests
# Arguments:
# testruns: array of Data objects from parseTraceLog
def createHTMLSummarySimple(testruns, htmlfile, folder):
# write the html header first (html head, css code, up to body start)
html = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>SleepGraph Summary</title>\n\
<style type=\'text/css\'>\n\
.stamp {width: 100%;text-align:center;background:#888;line-height:30px;color:white;font: 25px Arial;}\n\
table {width:100%;border-collapse: collapse;}\n\
.summary {border:1px solid;}\n\
th {border: 1px solid black;background:#222;color:white;}\n\
td {font: 16px "Times New Roman";text-align: center;}\n\
tr.alt td {background:#ddd;}\n\
tr.avg td {background:#aaa;}\n\
</style>\n</head>\n<body>\n'
# group test header
html += '<div class="stamp">%s (%d tests)</div>\n' % (folder, len(testruns))
th = '\t<th>{0}</th>\n'
td = '\t<td>{0}</td>\n'
tdlink = '\t<td><a href="{0}">html</a></td>\n'
# table header
html += '<table class="summary">\n<tr>\n' + th.format('#') +\
th.format('Mode') + th.format('Host') + th.format('Kernel') +\
th.format('Test Time') + th.format('Suspend') + th.format('Resume') +\
th.format('Detail') + '</tr>\n'
# test data, 1 row per test
avg = '<tr class="avg"><td></td><td></td><td></td><td></td>'+\
'<td>Average of {0} {1} tests</td><td>{2}</td><td>{3}</td><td></td></tr>\n'
sTimeAvg = rTimeAvg = 0.0
mode = ''
num = 0
for data in sorted(testruns, key=lambda v:(v['mode'], v['host'], v['kernel'])):
if mode != data['mode']:
# test average line
if(num > 0):
sTimeAvg /= (num - 1)
rTimeAvg /= (num - 1)
html += avg.format('%d' % (num - 1), mode,
'%3.3f ms' % sTimeAvg, '%3.3f ms' % rTimeAvg)
sTimeAvg = rTimeAvg = 0.0
mode = data['mode']
num = 1
# alternate row color
if num % 2 == 1:
html += '<tr class="alt">\n'
else:
html += '<tr>\n'
html += td.format("%d" % num)
num += 1
# basic info
for item in ['mode', 'host', 'kernel', 'time']:
val = "unknown"
if(item in data):
val = data[item]
html += td.format(val)
# suspend time
sTime = float(data['suspend'])
sTimeAvg += sTime
html += td.format('%.3f ms' % sTime)
# resume time
rTime = float(data['resume'])
rTimeAvg += rTime
html += td.format('%.3f ms' % rTime)
# link to the output html
html += tdlink.format(data['url']) + '</tr>\n'
# last test average line
if(num > 0):
sTimeAvg /= (num - 1)
rTimeAvg /= (num - 1)
html += avg.format('%d' % (num - 1), mode,
'%3.3f ms' % sTimeAvg, '%3.3f ms' % rTimeAvg)
# flush the data to file
hf = open(htmlfile, 'w')
hf.write(html+'</table>\n</body>\n</html>\n')
hf.close()
def ordinal(value):
suffix = 'th'
if value < 10 or value > 19:
if value % 10 == 1:
suffix = 'st'
elif value % 10 == 2:
suffix = 'nd'
elif value % 10 == 3:
suffix = 'rd'
return '%d%s' % (value, suffix)
# Function: createHTML
# Description:
# Create the output html file from the resident test data
# Arguments:
# testruns: array of Data objects from parseKernelLog or parseTraceLog
# Output:
# True if the html file was created, false if it failed
def createHTML(testruns):
if len(testruns) < 1:
print('ERROR: Not enough test data to build a timeline')
return
kerror = False
for data in testruns:
if data.kerror:
kerror = True
data.normalizeTime(testruns[-1].tSuspended)
# html function templates
html_error = '<div id="{1}" title="kernel error/warning" class="err" style="right:{0}%">ERROR→</div>\n'
html_traceevent = '<div title="{0}" class="traceevent{6}" style="left:{1}%;top:{2}px;height:{3}px;width:{4}%;line-height:{3}px;{7}">{5}</div>\n'
html_cpuexec = '<div class="jiffie" style="left:{0}%;top:{1}px;height:{2}px;width:{3}%;background:{4};"></div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="green" title="{3}">{2} Suspend Time: <b>{0} ms</b></td>'\
'<td class="yellow" title="{4}">{2} Resume Time: <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal2 = '<table class="time1">\n<tr>'\
'<td class="green" title="{4}">{3} Suspend Time: <b>{0} ms</b></td>'\
'<td class="gray" title="time spent in low-power mode with clock running">'+sysvals.suspendmode+' time: <b>{1} ms</b></td>'\
'<td class="yellow" title="{5}">{3} Resume Time: <b>{2} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal3 = '<table class="time1">\n<tr>'\
'<td class="green">Execution Time: <b>{0} ms</b></td>'\
'<td class="yellow">Command: <b>{1}</b></td>'\
'</tr>\n</table>\n'
html_timegroups = '<table class="time2">\n<tr>'\
'<td class="green" title="time from kernel enter_state({5}) to firmware mode [kernel time only]">{4}Kernel Suspend: {0} ms</td>'\
'<td class="purple">{4}Firmware Suspend: {1} ms</td>'\
'<td class="purple">{4}Firmware Resume: {2} ms</td>'\
'<td class="yellow" title="time from firmware mode to return from kernel enter_state({5}) [kernel time only]">{4}Kernel Resume: {3} ms</td>'\
'</tr>\n</table>\n'
# html format variables
scaleH = 20
if kerror:
scaleH = 40
# device timeline
vprint('Creating Device Timeline...')
devtl = Timeline(30, scaleH)
# write the test title and general info header
devtl.createHeader(sysvals)
# Generate the header for this timeline
for data in testruns:
tTotal = data.end - data.start
sktime, rktime = data.getTimeValues()
if(tTotal == 0):
print('ERROR: No timeline data')
sys.exit()
if(data.tLow > 0):
low_time = '%.0f'%(data.tLow*1000)
if sysvals.suspendmode == 'command':
run_time = '%.0f'%((data.end-data.start)*1000)
if sysvals.testcommand:
testdesc = sysvals.testcommand
else:
testdesc = 'unknown'
if(len(testruns) > 1):
testdesc = ordinal(data.testnumber+1)+' '+testdesc
thtml = html_timetotal3.format(run_time, testdesc)
devtl.html += thtml
elif data.fwValid:
suspend_time = '%.0f'%(sktime + (data.fwSuspend/1000000.0))
resume_time = '%.0f'%(rktime + (data.fwResume/1000000.0))
testdesc1 = 'Total'
testdesc2 = ''
stitle = 'time from kernel enter_state(%s) to low-power mode [kernel & firmware time]' % sysvals.suspendmode
rtitle = 'time from low-power mode to return from kernel enter_state(%s) [firmware & kernel time]' % sysvals.suspendmode
if(len(testruns) > 1):
testdesc1 = testdesc2 = ordinal(data.testnumber+1)
testdesc2 += ' '
if(data.tLow == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc1, stitle, rtitle)
else:
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc1, stitle, rtitle)
devtl.html += thtml
sftime = '%.3f'%(data.fwSuspend / 1000000.0)
rftime = '%.3f'%(data.fwResume / 1000000.0)
devtl.html += html_timegroups.format('%.3f'%sktime, \
sftime, rftime, '%.3f'%rktime, testdesc2, sysvals.suspendmode)
else:
suspend_time = '%.3f' % sktime
resume_time = '%.3f' % rktime
testdesc = 'Kernel'
stitle = 'time from kernel enter_state(%s) to firmware mode [kernel time only]' % sysvals.suspendmode
rtitle = 'time from firmware mode to return from kernel enter_state(%s) [kernel time only]' % sysvals.suspendmode
if(len(testruns) > 1):
testdesc = ordinal(data.testnumber+1)+' '+testdesc
if(data.tLow == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc, stitle, rtitle)
else:
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc, stitle, rtitle)
devtl.html += thtml
# time scale for potentially multiple datasets
t0 = testruns[0].start
tMax = testruns[-1].end
tTotal = tMax - t0
# determine the maximum number of rows we need to draw
fulllist = []
threadlist = []
pscnt = 0
devcnt = 0
for data in testruns:
data.selectTimelineDevices('%f', tTotal, sysvals.mindevlen)
for group in data.devicegroups:
devlist = []
for phase in group:
for devname in data.tdevlist[phase]:
d = DevItem(data.testnumber, phase, data.dmesg[phase]['list'][devname])
devlist.append(d)
if d.isa('kth'):
threadlist.append(d)
else:
if d.isa('ps'):
pscnt += 1
else:
devcnt += 1
fulllist.append(d)
if sysvals.mixedphaseheight:
devtl.getPhaseRows(devlist)
if not sysvals.mixedphaseheight:
if len(threadlist) > 0 and len(fulllist) > 0:
if pscnt > 0 and devcnt > 0:
msg = 'user processes & device pm callbacks'
elif pscnt > 0:
msg = 'user processes'
else:
msg = 'device pm callbacks'
d = testruns[0].addHorizontalDivider(msg, testruns[-1].end)
fulllist.insert(0, d)
devtl.getPhaseRows(fulllist)
if len(threadlist) > 0:
d = testruns[0].addHorizontalDivider('asynchronous kernel threads', testruns[-1].end)
threadlist.insert(0, d)
devtl.getPhaseRows(threadlist, devtl.rows)
devtl.calcTotalRows()
# draw the full timeline
devtl.createZoomBox(sysvals.suspendmode, len(testruns))
phases = {'suspend':[],'resume':[]}
for phase in data.dmesg:
if 'resume' in phase:
phases['resume'].append(phase)
else:
phases['suspend'].append(phase)
# draw each test run chronologically
for data in testruns:
# now draw the actual timeline blocks
for dir in phases:
# draw suspend and resume blocks separately
bname = '%s%d' % (dir[0], data.testnumber)
if dir == 'suspend':
m0 = data.start
mMax = data.tSuspended
left = '%f' % (((m0-t0)*100.0)/tTotal)
else:
m0 = data.tSuspended
mMax = data.end
# in an x2 run, remove any gap between blocks
if len(testruns) > 1 and data.testnumber == 0:
mMax = testruns[1].start
left = '%f' % ((((m0-t0)*100.0)+sysvals.srgap/2)/tTotal)
mTotal = mMax - m0
# if a timeline block is 0 length, skip altogether
if mTotal == 0:
continue
width = '%f' % (((mTotal*100.0)-sysvals.srgap/2)/tTotal)
devtl.html += devtl.html_tblock.format(bname, left, width, devtl.scaleH)
for b in sorted(phases[dir]):
# draw the phase color background
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%f' % (((phase['start']-m0)*100.0)/mTotal)
width = '%f' % ((length*100.0)/mTotal)
devtl.html += devtl.html_phase.format(left, width, \
'%.3f'%devtl.scaleH, '%.3f'%devtl.bodyH, \
data.dmesg[b]['color'], '')
for e in data.errorinfo[dir]:
# draw red lines for any kernel errors found
t, err = e
right = '%f' % (((mMax-t)*100.0)/mTotal)
devtl.html += html_error.format(right, err)
for b in sorted(phases[dir]):
# draw the devices for this phase
phaselist = data.dmesg[b]['list']
for d in data.tdevlist[b]:
name = d
drv = ''
dev = phaselist[d]
xtraclass = ''
xtrainfo = ''
xtrastyle = ''
if 'htmlclass' in dev:
xtraclass = dev['htmlclass']
if 'color' in dev:
xtrastyle = 'background:%s;' % dev['color']
if(d in sysvals.devprops):
name = sysvals.devprops[d].altName(d)
xtraclass = sysvals.devprops[d].xtraClass()
xtrainfo = sysvals.devprops[d].xtraInfo()
elif xtraclass == ' kth':
xtrainfo = ' kernel_thread'
if('drv' in dev and dev['drv']):
drv = ' {%s}' % dev['drv']
rowheight = devtl.phaseRowHeight(data.testnumber, b, dev['row'])
rowtop = devtl.phaseRowTop(data.testnumber, b, dev['row'])
top = '%.3f' % (rowtop + devtl.scaleH)
left = '%f' % (((dev['start']-m0)*100)/mTotal)
width = '%f' % (((dev['end']-dev['start'])*100)/mTotal)
length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
title = name+drv+xtrainfo+length
if sysvals.suspendmode == 'command':
title += sysvals.testcommand
elif xtraclass == ' ps':
if 'suspend' in b:
title += 'pre_suspend_process'
else:
title += 'post_resume_process'
else:
title += b
devtl.html += devtl.html_device.format(dev['id'], \
title, left, top, '%.3f'%rowheight, width, \
d+drv, xtraclass, xtrastyle)
if('cpuexec' in dev):
for t in sorted(dev['cpuexec']):
start, end = t
j = float(dev['cpuexec'][t]) / 5
if j > 1.0:
j = 1.0
height = '%.3f' % (rowheight/3)
top = '%.3f' % (rowtop + devtl.scaleH + 2*rowheight/3)
left = '%f' % (((start-m0)*100)/mTotal)
width = '%f' % ((end-start)*100/mTotal)
color = 'rgba(255, 0, 0, %f)' % j
devtl.html += \
html_cpuexec.format(left, top, height, width, color)
if('src' not in dev):
continue
# draw any trace events for this device
for e in dev['src']:
height = '%.3f' % devtl.rowH
top = '%.3f' % (rowtop + devtl.scaleH + (e.row*devtl.rowH))
left = '%f' % (((e.time-m0)*100)/mTotal)
width = '%f' % (e.length*100/mTotal)
xtrastyle = ''
if e.color:
xtrastyle = 'background:%s;' % e.color
devtl.html += \
html_traceevent.format(e.title(), \
left, top, height, width, e.text(), '', xtrastyle)
# draw the time scale, try to make the number of labels readable
devtl.createTimeScale(m0, mMax, tTotal, dir)
devtl.html += '</div>\n'
# timeline is finished
devtl.html += '</div>\n</div>\n'
# draw a legend which describes the phases by color
if sysvals.suspendmode != 'command':
data = testruns[-1]
devtl.html += '<div class="legend">\n'
pdelta = 100.0/len(data.phases)
pmargin = pdelta / 4.0
for phase in data.phases:
tmp = phase.split('_')
id = tmp[0][0]
if(len(tmp) > 1):
id += tmp[1][0]
order = '%.2f' % ((data.dmesg[phase]['order'] * pdelta) + pmargin)
name = string.replace(phase, '_', ' ')
devtl.html += devtl.html_legend.format(order, \
data.dmesg[phase]['color'], name, id)
devtl.html += '</div>\n'
hf = open(sysvals.htmlfile, 'w')
# no header or css if its embedded
if(sysvals.embedded):
hf.write('pass True tSus %.3f tRes %.3f tLow %.3f fwvalid %s tSus %.3f tRes %.3f\n' %
(data.tSuspended-data.start, data.end-data.tSuspended, data.tLow, data.fwValid, \
data.fwSuspend/1000000, data.fwResume/1000000))
else:
addCSS(hf, sysvals, len(testruns), kerror)
# write the device timeline
hf.write(devtl.html)
hf.write('<div id="devicedetailtitle"></div>\n')
hf.write('<div id="devicedetail" style="display:none;">\n')
# draw the colored boxes for the device detail section
for data in testruns:
hf.write('<div id="devicedetail%d">\n' % data.testnumber)
pscolor = 'linear-gradient(to top left, #ccc, #eee)'
hf.write(devtl.html_phaselet.format('pre_suspend_process', \
'0', '0', pscolor))
for b in data.phases:
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
hf.write(devtl.html_phaselet.format(b, left, width, \
data.dmesg[b]['color']))
hf.write(devtl.html_phaselet.format('post_resume_process', \
'0', '0', pscolor))
if sysvals.suspendmode == 'command':
hf.write(devtl.html_phaselet.format('cmdexec', '0', '0', pscolor))
hf.write('</div>\n')
hf.write('</div>\n')
# write the ftrace data (callgraph)
if sysvals.cgtest >= 0 and len(testruns) > sysvals.cgtest:
data = testruns[sysvals.cgtest]
else:
data = testruns[-1]
if(sysvals.usecallgraph and not sysvals.embedded):
addCallgraphs(sysvals, hf, data)
# add the test log as a hidden div
if sysvals.testlog and sysvals.logmsg:
hf.write('<div id="testlog" style="display:none;">\n'+sysvals.logmsg+'</div>\n')
# add the dmesg log as a hidden div
if sysvals.dmesglog and sysvals.dmesgfile:
hf.write('<div id="dmesglog" style="display:none;">\n')
lf = open(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('<', '<').replace('>', '>')
hf.write(line)
lf.close()
hf.write('</div>\n')
# add the ftrace log as a hidden div
if sysvals.ftracelog and sysvals.ftracefile:
hf.write('<div id="ftracelog" style="display:none;">\n')
lf = open(sysvals.ftracefile, 'r')
for line in lf:
hf.write(line)
lf.close()
hf.write('</div>\n')
if(not sysvals.embedded):
# write the footer and close
addScriptCode(hf, testruns)
hf.write('</body>\n</html>\n')
else:
# embedded out will be loaded in a page, skip the js
t0 = (testruns[0].start - testruns[-1].tSuspended) * 1000
tMax = (testruns[-1].end - testruns[-1].tSuspended) * 1000
# add js code in a div entry for later evaluation
detail = 'var bounds = [%f,%f];\n' % (t0, tMax)
detail += 'var devtable = [\n'
for data in testruns:
topo = data.deviceTopology()
detail += '\t"%s",\n' % (topo)
detail += '];\n'
hf.write('<div id=customcode style=display:none>\n'+detail+'</div>\n')
hf.close()
return True
def addCSS(hf, sv, testcount=1, kerror=False, extra=''):
kernel = sv.stamp['kernel']
host = sv.hostname[0].upper()+sv.hostname[1:]
mode = sv.suspendmode
if sv.suspendmode in suspendmodename:
mode = suspendmodename[sv.suspendmode]
title = host+' '+mode+' '+kernel
# various format changes by flags
cgchk = 'checked'
cgnchk = 'not(:checked)'
if sv.cgexp:
cgchk = 'not(:checked)'
cgnchk = 'checked'
hoverZ = 'z-index:8;'
if sv.usedevsrc:
hoverZ = ''
devlistpos = 'absolute'
if testcount > 1:
devlistpos = 'relative'
scaleTH = 20
if kerror:
scaleTH = 60
# write the html header first (html head, css code, up to body start)
html_header = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>'+title+'</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y:scroll;}\n\
.stamp {width:100%;text-align:center;background:gray;line-height:30px;color:white;font:25px Arial;}\n\
.stamp.sysinfo {font:10px Arial;}\n\
.callgraph {margin-top:30px;box-shadow:5px 5px 20px black;}\n\
.callgraph article * {padding-left:28px;}\n\
h1 {color:black;font:bold 30px Times;}\n\
t0 {color:black;font:bold 30px Times;}\n\
t1 {color:black;font:30px Times;}\n\
t2 {color:black;font:25px Times;}\n\
t3 {color:black;font:20px Times;white-space:nowrap;}\n\
t4 {color:black;font:bold 30px Times;line-height:60px;white-space:nowrap;}\n\
cS {font:bold 13px Times;}\n\
table {width:100%;}\n\
.gray {background:rgba(80,80,80,0.1);}\n\
.green {background:rgba(204,255,204,0.4);}\n\
.purple {background:rgba(128,0,128,0.2);}\n\
.yellow {background:rgba(255,255,204,0.4);}\n\
.blue {background:rgba(169,208,245,0.4);}\n\
.time1 {font:22px Arial;border:1px solid;}\n\
.time2 {font:15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\
td {text-align:center;}\n\
r {color:#500000;font:15px Tahoma;}\n\
n {color:#505050;font:15px Tahoma;}\n\
.tdhl {color:red;}\n\
.hide {display:none;}\n\
.pf {display:none;}\n\
.pf:'+cgchk+' + label {background:url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/><rect x="8" y="4" width="2" height="10" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:'+cgnchk+' ~ label {background:url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:'+cgchk+' ~ *:not(:nth-child(2)) {display:none;}\n\
.zoombox {position:relative;width:100%;overflow-x:scroll;-webkit-user-select:none;-moz-user-select:none;user-select:none;}\n\
.timeline {position:relative;font-size:14px;cursor:pointer;width:100%; overflow:hidden;background:linear-gradient(#cccccc, white);}\n\
.thread {position:absolute;height:0%;overflow:hidden;z-index:7;line-height:30px;font-size:14px;border:1px solid;text-align:center;white-space:nowrap;}\n\
.thread.ps {border-radius:3px;background:linear-gradient(to top, #ccc, #eee);}\n\
.thread:hover {background:white;border:1px solid red;'+hoverZ+'}\n\
.thread.sec,.thread.sec:hover {background:black;border:0;color:white;line-height:15px;font-size:10px;}\n\
.hover {background:white;border:1px solid red;'+hoverZ+'}\n\
.hover.sync {background:white;}\n\
.hover.bg,.hover.kth,.hover.sync,.hover.ps {background:white;}\n\
.jiffie {position:absolute;pointer-events: none;z-index:8;}\n\
.traceevent {position:absolute;font-size:10px;z-index:7;overflow:hidden;color:black;text-align:center;white-space:nowrap;border-radius:5px;border:1px solid black;background:linear-gradient(to bottom right,#CCC,#969696);}\n\
.traceevent:hover {color:white;font-weight:bold;border:1px solid white;}\n\
.phase {position:absolute;overflow:hidden;border:0px;text-align:center;}\n\
.phaselet {float:left;overflow:hidden;border:0px;text-align:center;min-height:100px;font-size:24px;}\n\
.t {position:absolute;line-height:'+('%d'%scaleTH)+'px;pointer-events:none;top:0;height:100%;border-right:1px solid black;z-index:6;}\n\
.err {position:absolute;top:0%;height:100%;border-right:3px solid red;color:red;font:bold 14px Times;line-height:18px;}\n\
.legend {position:relative; width:100%; height:40px; text-align:center;margin-bottom:20px}\n\
.legend .square {position:absolute;cursor:pointer;top:10px; width:0px;height:20px;border:1px solid;padding-left:20px;}\n\
button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\
.btnfmt {position:relative;float:right;height:25px;width:auto;margin-top:3px;margin-bottom:0;font-size:10px;text-align:center;}\n\
.devlist {position:'+devlistpos+';width:190px;}\n\
a:link {color:white;text-decoration:none;}\n\
a:visited {color:white;}\n\
a:hover {color:white;}\n\
a:active {color:white;}\n\
.version {position:relative;float:left;color:white;font-size:10px;line-height:30px;margin-left:10px;}\n\
#devicedetail {min-height:100px;box-shadow:5px 5px 20px black;}\n\
.tblock {position:absolute;height:100%;background:#ddd;}\n\
.tback {position:absolute;width:100%;background:linear-gradient(#ccc, #ddd);}\n\
.bg {z-index:1;}\n\
'+extra+'\
</style>\n</head>\n<body>\n'
hf.write(html_header)
# Function: addScriptCode
# Description:
# Adds the javascript code to the output html
# Arguments:
# hf: the open html file pointer
# testruns: array of Data objects from parseKernelLog or parseTraceLog
def addScriptCode(hf, testruns):
t0 = testruns[0].start * 1000
tMax = testruns[-1].end * 1000
# create an array in javascript memory with the device details
detail = ' var devtable = [];\n'
for data in testruns:
topo = data.deviceTopology()
detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo)
detail += ' var bounds = [%f,%f];\n' % (t0, tMax)
# add the code which will manipulate the data in the browser
script_code = \
'<script type="text/javascript">\n'+detail+\
' var resolution = -1;\n'\
' var dragval = [0, 0];\n'\
' function redrawTimescale(t0, tMax, tS) {\n'\
' var rline = \'<div class="t" style="left:0;border-left:1px solid black;border-right:0;">\';\n'\
' var tTotal = tMax - t0;\n'\
' var list = document.getElementsByClassName("tblock");\n'\
' for (var i = 0; i < list.length; i++) {\n'\
' var timescale = list[i].getElementsByClassName("timescale")[0];\n'\
' var m0 = t0 + (tTotal*parseFloat(list[i].style.left)/100);\n'\
' var mTotal = tTotal*parseFloat(list[i].style.width)/100;\n'\
' var mMax = m0 + mTotal;\n'\
' var html = "";\n'\
' var divTotal = Math.floor(mTotal/tS) + 1;\n'\
' if(divTotal > 1000) continue;\n'\
' var divEdge = (mTotal - tS*(divTotal-1))*100/mTotal;\n'\
' var pos = 0.0, val = 0.0;\n'\
' for (var j = 0; j < divTotal; j++) {\n'\
' var htmlline = "";\n'\
' var mode = list[i].id[5];\n'\
' if(mode == "s") {\n'\
' pos = 100 - (((j)*tS*100)/mTotal) - divEdge;\n'\
' val = (j-divTotal+1)*tS;\n'\
' if(j == divTotal - 1)\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%"><cS>S→</cS></div>\';\n'\
' else\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
' } else {\n'\
' pos = 100 - (((j)*tS*100)/mTotal);\n'\
' val = (j)*tS;\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
' if(j == 0)\n'\
' if(mode == "r")\n'\
' htmlline = rline+"<cS>←R</cS></div>";\n'\
' else\n'\
' htmlline = rline+"<cS>0ms</div>";\n'\
' }\n'\
' html += htmlline;\n'\
' }\n'\
' timescale.innerHTML = html;\n'\
' }\n'\
' }\n'\
' function zoomTimeline() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' var left = zoombox.scrollLeft;\n'\
' var val = parseFloat(dmesg.style.width);\n'\
' var newval = 100;\n'\
' var sh = window.outerWidth / 2;\n'\
' if(this.id == "zoomin") {\n'\
' newval = val * 1.2;\n'\
' if(newval > 910034) newval = 910034;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
' } else if (this.id == "zoomout") {\n'\
' newval = val / 1.2;\n'\
' if(newval < 100) newval = 100;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
' } else {\n'\
' zoombox.scrollLeft = 0;\n'\
' dmesg.style.width = "100%";\n'\
' }\n'\
' var tS = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 20, 10, 5, 2, 1];\n'\
' var t0 = bounds[0];\n'\
' var tMax = bounds[1];\n'\
' var tTotal = tMax - t0;\n'\
' var wTotal = tTotal * 100.0 / newval;\n'\
' var idx = 7*window.innerWidth/1100;\n'\
' for(var i = 0; (i < tS.length)&&((wTotal / tS[i]) < idx); i++);\n'\
' if(i >= tS.length) i = tS.length - 1;\n'\
' if(tS[i] == resolution) return;\n'\
' resolution = tS[i];\n'\
' redrawTimescale(t0, tMax, tS[i]);\n'\
' }\n'\
' function deviceName(title) {\n'\
' var name = title.slice(0, title.indexOf(" ("));\n'\
' return name;\n'\
' }\n'\
' function deviceHover() {\n'\
' var name = deviceName(this.title);\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = deviceName(dev[i].title);\n'\
' var cname = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' dev[i].className = "hover "+cname;\n'\
' } else {\n'\
' dev[i].className = cname;\n'\
' }\n'\
' }\n'\
' }\n'\
' function deviceUnhover() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].className = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
' }\n'\
' }\n'\
' function deviceTitle(title, total, cpu) {\n'\
' var prefix = "Total";\n'\
' if(total.length > 3) {\n'\
' prefix = "Average";\n'\
' total[1] = (total[1]+total[3])/2;\n'\
' total[2] = (total[2]+total[4])/2;\n'\
' }\n'\
' var devtitle = document.getElementById("devicedetailtitle");\n'\
' var name = deviceName(title);\n'\
' if(cpu >= 0) name = "CPU"+cpu;\n'\
' var driver = "";\n'\
' var tS = "<t2>(</t2>";\n'\
' var tR = "<t2>)</t2>";\n'\
' if(total[1] > 0)\n'\
' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\
' if(total[2] > 0)\n'\
' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\
' var s = title.indexOf("{");\n'\
' var e = title.indexOf("}");\n'\
' if((s >= 0) && (e >= 0))\n'\
' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\
' if(total[1] > 0 && total[2] > 0)\n'\
' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\
' else\n'\
' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\
' return name;\n'\
' }\n'\
' function deviceDetail() {\n'\
' var devinfo = document.getElementById("devicedetail");\n'\
' devinfo.style.display = "block";\n'\
' var name = deviceName(this.title);\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var idlist = [];\n'\
' var pdata = [[]];\n'\
' if(document.getElementById("devicedetail1"))\n'\
' pdata = [[], []];\n'\
' var pd = pdata[0];\n'\
' var total = [0.0, 0.0, 0.0];\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = deviceName(dev[i].title);\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' idlist[idlist.length] = dev[i].id;\n'\
' var tidx = 1;\n'\
' if(dev[i].id[0] == "a") {\n'\
' pd = pdata[0];\n'\
' } else {\n'\
' if(pdata.length == 1) pdata[1] = [];\n'\
' if(total.length == 3) total[3]=total[4]=0.0;\n'\
' pd = pdata[1];\n'\
' tidx = 3;\n'\
' }\n'\
' var info = dev[i].title.split(" ");\n'\
' var pname = info[info.length-1];\n'\
' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\
' total[0] += pd[pname];\n'\
' if(pname.indexOf("suspend") >= 0)\n'\
' total[tidx] += pd[pname];\n'\
' else\n'\
' total[tidx+1] += pd[pname];\n'\
' }\n'\
' }\n'\
' var devname = deviceTitle(this.title, total, cpu);\n'\
' var left = 0.0;\n'\
' for (var t = 0; t < pdata.length; t++) {\n'\
' pd = pdata[t];\n'\
' devinfo = document.getElementById("devicedetail"+t);\n'\
' var phases = devinfo.getElementsByClassName("phaselet");\n'\
' for (var i = 0; i < phases.length; i++) {\n'\
' if(phases[i].id in pd) {\n'\
' var w = 100.0*pd[phases[i].id]/total[0];\n'\
' var fs = 32;\n'\
' if(w < 8) fs = 4*w | 0;\n'\
' var fs2 = fs*3/4;\n'\
' phases[i].style.width = w+"%";\n'\
' phases[i].style.left = left+"%";\n'\
' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\
' left += w;\n'\
' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\
' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace(new RegExp("_", "g"), " ")+"</t3>";\n'\
' phases[i].innerHTML = time+pname;\n'\
' } else {\n'\
' phases[i].style.width = "0%";\n'\
' phases[i].style.left = left+"%";\n'\
' }\n'\
' }\n'\
' }\n'\
' if(typeof devstats !== \'undefined\')\n'\
' callDetail(this.id, this.title);\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' if(cg.length < 10) return;\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' cgid = cg[i].id.split("x")[0]\n'\
' if(idlist.indexOf(cgid) >= 0) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function callDetail(devid, devtitle) {\n'\
' if(!(devid in devstats) || devstats[devid].length < 1)\n'\
' return;\n'\
' var list = devstats[devid];\n'\
' var tmp = devtitle.split(" ");\n'\
' var name = tmp[0], phase = tmp[tmp.length-1];\n'\
' var dd = document.getElementById(phase);\n'\
' var total = parseFloat(tmp[1].slice(1));\n'\
' var mlist = [];\n'\
' var maxlen = 0;\n'\
' var info = []\n'\
' for(var i in list) {\n'\
' if(list[i][0] == "@") {\n'\
' info = list[i].split("|");\n'\
' continue;\n'\
' }\n'\
' var tmp = list[i].split("|");\n'\
' var t = parseFloat(tmp[0]), f = tmp[1], c = parseInt(tmp[2]);\n'\
' var p = (t*100.0/total).toFixed(2);\n'\
' mlist[mlist.length] = [f, c, t.toFixed(2), p+"%"];\n'\
' if(f.length > maxlen)\n'\
' maxlen = f.length;\n'\
' }\n'\
' var pad = 5;\n'\
' if(mlist.length == 0) pad = 30;\n'\
' var html = \'<div style="padding-top:\'+pad+\'px"><t3> <b>\'+name+\':</b>\';\n'\
' if(info.length > 2)\n'\
' html += " start=<b>"+info[1]+"</b>, end=<b>"+info[2]+"</b>";\n'\
' if(info.length > 3)\n'\
' html += ", length<i>(w/o overhead)</i>=<b>"+info[3]+" ms</b>";\n'\
' if(info.length > 4)\n'\
' html += ", return=<b>"+info[4]+"</b>";\n'\
' html += "</t3></div>";\n'\
' if(mlist.length > 0) {\n'\
' html += \'<table class=fstat style="padding-top:\'+(maxlen*5)+\'px;"><tr><th>Function</th>\';\n'\
' for(var i in mlist)\n'\
' html += "<td class=vt>"+mlist[i][0]+"</td>";\n'\
' html += "</tr><tr><th>Calls</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][1]+"</td>";\n'\
' html += "</tr><tr><th>Time(ms)</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][2]+"</td>";\n'\
' html += "</tr><tr><th>Percent</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][3]+"</td>";\n'\
' html += "</tr></table>";\n'\
' }\n'\
' dd.innerHTML = html;\n'\
' var height = (maxlen*5)+100;\n'\
' dd.style.height = height+"px";\n'\
' document.getElementById("devicedetail").style.height = height+"px";\n'\
' }\n'\
' function callSelect() {\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' if(this.id == cg[i].id) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function devListWindow(e) {\n'\
' var win = window.open();\n'\
' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\
' "<style type=\\"text/css\\">"+\n'\
' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\
' "</style>"\n'\
' var dt = devtable[0];\n'\
' if(e.target.id != "devlist1")\n'\
' dt = devtable[1];\n'\
' win.document.write(html+dt);\n'\
' }\n'\
' function errWindow() {\n'\
' var text = this.id;\n'\
' var win = window.open();\n'\
' win.document.write("<pre>"+text+"</pre>");\n'\
' win.document.close();\n'\
' }\n'\
' function logWindow(e) {\n'\
' var name = e.target.id.slice(4);\n'\
' var win = window.open();\n'\
' var log = document.getElementById(name+"log");\n'\
' var title = "<title>"+document.title.split(" ")[0]+" "+name+" log</title>";\n'\
' win.document.write(title+"<pre>"+log.innerHTML+"</pre>");\n'\
' win.document.close();\n'\
' }\n'\
' function onMouseDown(e) {\n'\
' dragval[0] = e.clientX;\n'\
' dragval[1] = document.getElementById("dmesgzoombox").scrollLeft;\n'\
' document.onmousemove = onMouseMove;\n'\
' }\n'\
' function onMouseMove(e) {\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' zoombox.scrollLeft = dragval[1] + dragval[0] - e.clientX;\n'\
' }\n'\
' function onMouseUp(e) {\n'\
' document.onmousemove = null;\n'\
' }\n'\
' function onKeyPress(e) {\n'\
' var c = e.charCode;\n'\
' if(c != 42 && c != 43 && c != 45) return;\n'\
' var click = document.createEvent("Events");\n'\
' click.initEvent("click", true, false);\n'\
' if(c == 43) \n'\
' document.getElementById("zoomin").dispatchEvent(click);\n'\
' else if(c == 45)\n'\
' document.getElementById("zoomout").dispatchEvent(click);\n'\
' else if(c == 42)\n'\
' document.getElementById("zoomdef").dispatchEvent(click);\n'\
' }\n'\
' window.addEventListener("resize", function () {zoomTimeline();});\n'\
' window.addEventListener("load", function () {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' dmesg.style.width = "100%"\n'\
' dmesg.onmousedown = onMouseDown;\n'\
' document.onmouseup = onMouseUp;\n'\
' document.onkeypress = onKeyPress;\n'\
' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
' var list = document.getElementsByClassName("err");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = errWindow;\n'\
' var list = document.getElementsByClassName("logbtn");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = logWindow;\n'\
' list = document.getElementsByClassName("devlist");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = devListWindow;\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].onclick = deviceDetail;\n'\
' dev[i].onmouseover = deviceHover;\n'\
' dev[i].onmouseout = deviceUnhover;\n'\
' }\n'\
' var dev = dmesg.getElementsByClassName("srccall");\n'\
' for (var i = 0; i < dev.length; i++)\n'\
' dev[i].onclick = callSelect;\n'\
' zoomTimeline();\n'\
' });\n'\
'</script>\n'
hf.write(script_code);
# Function: executeSuspend
# Description:
# Execute system suspend through the sysfs interface, then copy the output
# dmesg and ftrace files to the test output directory.
def executeSuspend():
pm = ProcessMonitor()
tp = sysvals.tpath
fwdata = []
# mark the start point in the kernel ring buffer just as we start
sysvals.initdmesg()
# start ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
print('START TRACING')
sysvals.fsetVal('1', 'tracing_on')
if sysvals.useprocmon:
pm.start()
# execute however many s/r runs requested
for count in range(1,sysvals.execcount+1):
# x2delay in between test runs
if(count > 1 and sysvals.x2delay > 0):
sysvals.fsetVal('WAIT %d' % sysvals.x2delay, 'trace_marker')
time.sleep(sysvals.x2delay/1000.0)
sysvals.fsetVal('WAIT END', 'trace_marker')
# start message
if sysvals.testcommand != '':
print('COMMAND START')
else:
if(sysvals.rtcwake):
print('SUSPEND START')
else:
print('SUSPEND START (press a key to resume)')
# set rtcwake
if(sysvals.rtcwake):
print('will issue an rtcwake in %d seconds' % sysvals.rtcwaketime)
sysvals.rtcWakeAlarmOn()
# start of suspend trace marker
if(sysvals.usecallgraph or sysvals.usetraceevents):
sysvals.fsetVal('SUSPEND START', 'trace_marker')
# predelay delay
if(count == 1 and sysvals.predelay > 0):
sysvals.fsetVal('WAIT %d' % sysvals.predelay, 'trace_marker')
time.sleep(sysvals.predelay/1000.0)
sysvals.fsetVal('WAIT END', 'trace_marker')
# initiate suspend or command
if sysvals.testcommand != '':
call(sysvals.testcommand+' 2>&1', shell=True);
else:
mode = sysvals.suspendmode
if sysvals.memmode and os.path.exists(sysvals.mempowerfile):
mode = 'mem'
pf = open(sysvals.mempowerfile, 'w')
pf.write(sysvals.memmode)
pf.close()
pf = open(sysvals.powerfile, 'w')
pf.write(mode)
# execution will pause here
try:
pf.close()
except:
pass
if(sysvals.rtcwake):
sysvals.rtcWakeAlarmOff()
# postdelay delay
if(count == sysvals.execcount and sysvals.postdelay > 0):
sysvals.fsetVal('WAIT %d' % sysvals.postdelay, 'trace_marker')
time.sleep(sysvals.postdelay/1000.0)
sysvals.fsetVal('WAIT END', 'trace_marker')
# return from suspend
print('RESUME COMPLETE')
if(sysvals.usecallgraph or sysvals.usetraceevents):
sysvals.fsetVal('RESUME COMPLETE', 'trace_marker')
if(sysvals.suspendmode == 'mem' or sysvals.suspendmode == 'command'):
fwdata.append(getFPDT(False))
# stop ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
if sysvals.useprocmon:
pm.stop()
sysvals.fsetVal('0', 'tracing_on')
print('CAPTURING TRACE')
sysvals.writeDatafileHeader(sysvals.ftracefile, fwdata)
call('cat '+tp+'trace >> '+sysvals.ftracefile, shell=True)
sysvals.fsetVal('', 'trace')
devProps()
# grab a copy of the dmesg output
print('CAPTURING DMESG')
sysvals.writeDatafileHeader(sysvals.dmesgfile, fwdata)
sysvals.getdmesg()
# Function: setUSBDevicesAuto
# Description:
# Set the autosuspend control parameter of all USB devices to auto
# This can be dangerous, so use at your own risk, most devices are set
# to always-on since the kernel cant determine if the device can
# properly autosuspend
def setUSBDevicesAuto():
sysvals.rootCheck(True)
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/usb[0-9]*.*', dirname) and
'idVendor' in filenames and 'idProduct' in filenames):
call('echo auto > %s/power/control' % dirname, shell=True)
name = dirname.split('/')[-1]
desc = Popen(['cat', '%s/product' % dirname],
stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
ctrl = Popen(['cat', '%s/power/control' % dirname],
stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
print('control is %s for %6s: %s' % (ctrl, name, desc))
# Function: yesno
# Description:
# Print out an equivalent Y or N for a set of known parameter values
# Output:
# 'Y', 'N', or ' ' if the value is unknown
def yesno(val):
yesvals = ['auto', 'enabled', 'active', '1']
novals = ['on', 'disabled', 'suspended', 'forbidden', 'unsupported']
if val in yesvals:
return 'Y'
elif val in novals:
return 'N'
return ' '
# Function: ms2nice
# Description:
# Print out a very concise time string in minutes and seconds
# Output:
# The time string, e.g. "1901m16s"
def ms2nice(val):
ms = 0
try:
ms = int(val)
except:
return 0.0
m = ms / 60000
s = (ms / 1000) - (m * 60)
return '%3dm%2ds' % (m, s)
# Function: detectUSB
# Description:
# Detect all the USB hosts and devices currently connected and add
# a list of USB device names to sysvals for better timeline readability
def detectUSB():
field = {'idVendor':'', 'idProduct':'', 'product':'', 'speed':''}
power = {'async':'', 'autosuspend':'', 'autosuspend_delay_ms':'',
'control':'', 'persist':'', 'runtime_enabled':'',
'runtime_status':'', 'runtime_usage':'',
'runtime_active_time':'',
'runtime_suspended_time':'',
'active_duration':'',
'connected_duration':''}
print('LEGEND')
print('---------------------------------------------------------------------------------------------')
print(' A = async/sync PM queue Y/N D = autosuspend delay (seconds)')
print(' S = autosuspend Y/N rACTIVE = runtime active (min/sec)')
print(' P = persist across suspend Y/N rSUSPEN = runtime suspend (min/sec)')
print(' E = runtime suspend enabled/forbidden Y/N ACTIVE = active duration (min/sec)')
print(' R = runtime status active/suspended Y/N CONNECT = connected duration (min/sec)')
print(' U = runtime usage count')
print('---------------------------------------------------------------------------------------------')
print(' NAME ID DESCRIPTION SPEED A S P E R U D rACTIVE rSUSPEN ACTIVE CONNECT')
print('---------------------------------------------------------------------------------------------')
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/usb[0-9]*.*', dirname) and
'idVendor' in filenames and 'idProduct' in filenames):
for i in field:
field[i] = Popen(['cat', '%s/%s' % (dirname, i)],
stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
name = dirname.split('/')[-1]
for i in power:
power[i] = Popen(['cat', '%s/power/%s' % (dirname, i)],
stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
if(re.match('usb[0-9]*', name)):
first = '%-8s' % name
else:
first = '%8s' % name
print('%s [%s:%s] %-20s %-4s %1s %1s %1s %1s %1s %1s %1s %s %s %s %s' % \
(first, field['idVendor'], field['idProduct'], \
field['product'][0:20], field['speed'], \
yesno(power['async']), \
yesno(power['control']), \
yesno(power['persist']), \
yesno(power['runtime_enabled']), \
yesno(power['runtime_status']), \
power['runtime_usage'], \
power['autosuspend'], \
ms2nice(power['runtime_active_time']), \
ms2nice(power['runtime_suspended_time']), \
ms2nice(power['active_duration']), \
ms2nice(power['connected_duration'])))
# Function: devProps
# Description:
# Retrieve a list of properties for all devices in the trace log
def devProps(data=0):
props = dict()
if data:
idx = data.index(': ') + 2
if idx >= len(data):
return
devlist = data[idx:].split(';')
for dev in devlist:
f = dev.split(',')
if len(f) < 3:
continue
dev = f[0]
props[dev] = DevProps()
props[dev].altname = f[1]
if int(f[2]):
props[dev].async = True
else:
props[dev].async = False
sysvals.devprops = props
if sysvals.suspendmode == 'command' and 'testcommandstring' in props:
sysvals.testcommand = props['testcommandstring'].altname
return
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
# first get the list of devices we need properties for
msghead = 'Additional data added by AnalyzeSuspend'
alreadystamped = False
tp = TestProps()
tf = open(sysvals.ftracefile, 'r')
for line in tf:
if msghead in line:
alreadystamped = True
continue
# determine the trace data type (required for further parsing)
m = re.match(sysvals.tracertypefmt, line)
if(m):
tp.setTracerType(m.group('t'))
continue
# parse only valid lines, if this is not one move on
m = re.match(tp.ftrace_line_fmt, line)
if(not m or 'device_pm_callback_start' not in line):
continue
m = re.match('.*: (?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*', m.group('msg'));
if(not m):
continue
dev = m.group('d')
if dev not in props:
props[dev] = DevProps()
tf.close()
if not alreadystamped and sysvals.suspendmode == 'command':
out = '#\n# '+msghead+'\n# Device Properties: '
out += 'testcommandstring,%s,0;' % (sysvals.testcommand)
with open(sysvals.ftracefile, 'a') as fp:
fp.write(out+'\n')
sysvals.devprops = props
return
# now get the syspath for each of our target devices
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/power', dirname) and 'async' in filenames):
dev = dirname.split('/')[-2]
if dev in props and (not props[dev].syspath or len(dirname) < len(props[dev].syspath)):
props[dev].syspath = dirname[:-6]
# now fill in the properties for our target devices
for dev in props:
dirname = props[dev].syspath
if not dirname or not os.path.exists(dirname):
continue
with open(dirname+'/power/async') as fp:
text = fp.read()
props[dev].async = False
if 'enabled' in text:
props[dev].async = True
fields = os.listdir(dirname)
if 'product' in fields:
with open(dirname+'/product') as fp:
props[dev].altname = fp.read()
elif 'name' in fields:
with open(dirname+'/name') as fp:
props[dev].altname = fp.read()
elif 'model' in fields:
with open(dirname+'/model') as fp:
props[dev].altname = fp.read()
elif 'description' in fields:
with open(dirname+'/description') as fp:
props[dev].altname = fp.read()
elif 'id' in fields:
with open(dirname+'/id') as fp:
props[dev].altname = fp.read()
elif 'idVendor' in fields and 'idProduct' in fields:
idv, idp = '', ''
with open(dirname+'/idVendor') as fp:
idv = fp.read().strip()
with open(dirname+'/idProduct') as fp:
idp = fp.read().strip()
props[dev].altname = '%s:%s' % (idv, idp)
if props[dev].altname:
out = props[dev].altname.strip().replace('\n', ' ')
out = out.replace(',', ' ')
out = out.replace(';', ' ')
props[dev].altname = out
# and now write the data to the ftrace file
if not alreadystamped:
out = '#\n# '+msghead+'\n# Device Properties: '
for dev in sorted(props):
out += props[dev].out(dev)
with open(sysvals.ftracefile, 'a') as fp:
fp.write(out+'\n')
sysvals.devprops = props
# Function: getModes
# Description:
# Determine the supported power modes on this system
# Output:
# A string list of the available modes
def getModes():
modes = []
if(os.path.exists(sysvals.powerfile)):
fp = open(sysvals.powerfile, 'r')
modes = string.split(fp.read())
fp.close()
if(os.path.exists(sysvals.mempowerfile)):
deep = False
fp = open(sysvals.mempowerfile, 'r')
for m in string.split(fp.read()):
memmode = m.strip('[]')
if memmode == 'deep':
deep = True
else:
modes.append('mem-%s' % memmode)
fp.close()
if 'mem' in modes and not deep:
modes.remove('mem')
return modes
# Function: dmidecode
# Description:
# Read the bios tables and pull out system info
# Arguments:
# mempath: /dev/mem or custom mem path
# fatal: True to exit on error, False to return empty dict
# Output:
# A dict object with all available key/values
def dmidecode(mempath, fatal=False):
out = dict()
# the list of values to retrieve, with hardcoded (type, idx)
info = {
'bios-vendor': (0, 4),
'bios-version': (0, 5),
'bios-release-date': (0, 8),
'system-manufacturer': (1, 4),
'system-product-name': (1, 5),
'system-version': (1, 6),
'system-serial-number': (1, 7),
'baseboard-manufacturer': (2, 4),
'baseboard-product-name': (2, 5),
'baseboard-version': (2, 6),
'baseboard-serial-number': (2, 7),
'chassis-manufacturer': (3, 4),
'chassis-type': (3, 5),
'chassis-version': (3, 6),
'chassis-serial-number': (3, 7),
'processor-manufacturer': (4, 7),
'processor-version': (4, 16),
}
if(not os.path.exists(mempath)):
if(fatal):
doError('file does not exist: %s' % mempath)
return out
if(not os.access(mempath, os.R_OK)):
if(fatal):
doError('file is not readable: %s' % mempath)
return out
# by default use legacy scan, but try to use EFI first
memaddr = 0xf0000
memsize = 0x10000
for ep in ['/sys/firmware/efi/systab', '/proc/efi/systab']:
if not os.path.exists(ep) or not os.access(ep, os.R_OK):
continue
fp = open(ep, 'r')
buf = fp.read()
fp.close()
i = buf.find('SMBIOS=')
if i >= 0:
try:
memaddr = int(buf[i+7:], 16)
memsize = 0x20
except:
continue
# read in the memory for scanning
fp = open(mempath, 'rb')
try:
fp.seek(memaddr)
buf = fp.read(memsize)
except:
if(fatal):
doError('DMI table is unreachable, sorry')
else:
return out
fp.close()
# search for either an SM table or DMI table
i = base = length = num = 0
while(i < memsize):
if buf[i:i+4] == '_SM_' and i < memsize - 16:
length = struct.unpack('H', buf[i+22:i+24])[0]
base, num = struct.unpack('IH', buf[i+24:i+30])
break
elif buf[i:i+5] == '_DMI_':
length = struct.unpack('H', buf[i+6:i+8])[0]
base, num = struct.unpack('IH', buf[i+8:i+14])
break
i += 16
if base == 0 and length == 0 and num == 0:
if(fatal):
doError('Neither SMBIOS nor DMI were found')
else:
return out
# read in the SM or DMI table
fp = open(mempath, 'rb')
try:
fp.seek(base)
buf = fp.read(length)
except:
if(fatal):
doError('DMI table is unreachable, sorry')
else:
return out
fp.close()
# scan the table for the values we want
count = i = 0
while(count < num and i <= len(buf) - 4):
type, size, handle = struct.unpack('BBH', buf[i:i+4])
n = i + size
while n < len(buf) - 1:
if 0 == struct.unpack('H', buf[n:n+2])[0]:
break
n += 1
data = buf[i+size:n+2].split('\0')
for name in info:
itype, idxadr = info[name]
if itype == type:
idx = struct.unpack('B', buf[i+idxadr])[0]
if idx > 0 and idx < len(data) - 1:
s = data[idx-1].strip()
if s and s.lower() != 'to be filled by o.e.m.':
out[name] = data[idx-1]
i = n + 2
count += 1
return out
# Function: getFPDT
# Description:
# Read the acpi bios tables and pull out FPDT, the firmware data
# Arguments:
# output: True to output the info to stdout, False otherwise
def getFPDT(output):
rectype = {}
rectype[0] = 'Firmware Basic Boot Performance Record'
rectype[1] = 'S3 Performance Table Record'
prectype = {}
prectype[0] = 'Basic S3 Resume Performance Record'
prectype[1] = 'Basic S3 Suspend Performance Record'
sysvals.rootCheck(True)
if(not os.path.exists(sysvals.fpdtpath)):
if(output):
doError('file does not exist: %s' % sysvals.fpdtpath)
return False
if(not os.access(sysvals.fpdtpath, os.R_OK)):
if(output):
doError('file is not readable: %s' % sysvals.fpdtpath)
return False
if(not os.path.exists(sysvals.mempath)):
if(output):
doError('file does not exist: %s' % sysvals.mempath)
return False
if(not os.access(sysvals.mempath, os.R_OK)):
if(output):
doError('file is not readable: %s' % sysvals.mempath)
return False
fp = open(sysvals.fpdtpath, 'rb')
buf = fp.read()
fp.close()
if(len(buf) < 36):
if(output):
doError('Invalid FPDT table data, should '+\
'be at least 36 bytes')
return False
table = struct.unpack('4sIBB6s8sI4sI', buf[0:36])
if(output):
print('')
print('Firmware Performance Data Table (%s)' % table[0])
print(' Signature : %s' % table[0])
print(' Table Length : %u' % table[1])
print(' Revision : %u' % table[2])
print(' Checksum : 0x%x' % table[3])
print(' OEM ID : %s' % table[4])
print(' OEM Table ID : %s' % table[5])
print(' OEM Revision : %u' % table[6])
print(' Creator ID : %s' % table[7])
print(' Creator Revision : 0x%x' % table[8])
print('')
if(table[0] != 'FPDT'):
if(output):
doError('Invalid FPDT table')
return False
if(len(buf) <= 36):
return False
i = 0
fwData = [0, 0]
records = buf[36:]
fp = open(sysvals.mempath, 'rb')
while(i < len(records)):
header = struct.unpack('HBB', records[i:i+4])
if(header[0] not in rectype):
i += header[1]
continue
if(header[1] != 16):
i += header[1]
continue
addr = struct.unpack('Q', records[i+8:i+16])[0]
try:
fp.seek(addr)
first = fp.read(8)
except:
if(output):
print('Bad address 0x%x in %s' % (addr, sysvals.mempath))
return [0, 0]
rechead = struct.unpack('4sI', first)
recdata = fp.read(rechead[1]-8)
if(rechead[0] == 'FBPT'):
record = struct.unpack('HBBIQQQQQ', recdata)
if(output):
print('%s (%s)' % (rectype[header[0]], rechead[0]))
print(' Reset END : %u ns' % record[4])
print(' OS Loader LoadImage Start : %u ns' % record[5])
print(' OS Loader StartImage Start : %u ns' % record[6])
print(' ExitBootServices Entry : %u ns' % record[7])
print(' ExitBootServices Exit : %u ns' % record[8])
elif(rechead[0] == 'S3PT'):
if(output):
print('%s (%s)' % (rectype[header[0]], rechead[0]))
j = 0
while(j < len(recdata)):
prechead = struct.unpack('HBB', recdata[j:j+4])
if(prechead[0] not in prectype):
continue
if(prechead[0] == 0):
record = struct.unpack('IIQQ', recdata[j:j+prechead[1]])
fwData[1] = record[2]
if(output):
print(' %s' % prectype[prechead[0]])
print(' Resume Count : %u' % \
record[1])
print(' FullResume : %u ns' % \
record[2])
print(' AverageResume : %u ns' % \
record[3])
elif(prechead[0] == 1):
record = struct.unpack('QQ', recdata[j+4:j+prechead[1]])
fwData[0] = record[1] - record[0]
if(output):
print(' %s' % prectype[prechead[0]])
print(' SuspendStart : %u ns' % \
record[0])
print(' SuspendEnd : %u ns' % \
record[1])
print(' SuspendTime : %u ns' % \
fwData[0])
j += prechead[1]
if(output):
print('')
i += header[1]
fp.close()
return fwData
# Function: statusCheck
# Description:
# Verify that the requested command and options will work, and
# print the results to the terminal
# Output:
# True if the test will work, False if not
def statusCheck(probecheck=False):
status = True
print('Checking this system (%s)...' % platform.node())
# check we have root access
res = sysvals.colorText('NO (No features of this tool will work!)')
if(sysvals.rootCheck(False)):
res = 'YES'
print(' have root access: %s' % res)
if(res != 'YES'):
print(' Try running this script with sudo')
return False
# check sysfs is mounted
res = sysvals.colorText('NO (No features of this tool will work!)')
if(os.path.exists(sysvals.powerfile)):
res = 'YES'
print(' is sysfs mounted: %s' % res)
if(res != 'YES'):
return False
# check target mode is a valid mode
if sysvals.suspendmode != 'command':
res = sysvals.colorText('NO')
modes = getModes()
if(sysvals.suspendmode in modes):
res = 'YES'
else:
status = False
print(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res))
if(res == 'NO'):
print(' valid power modes are: %s' % modes)
print(' please choose one with -m')
# check if ftrace is available
res = sysvals.colorText('NO')
ftgood = sysvals.verifyFtrace()
if(ftgood):
res = 'YES'
elif(sysvals.usecallgraph):
status = False
print(' is ftrace supported: %s' % res)
# check if kprobes are available
res = sysvals.colorText('NO')
sysvals.usekprobes = sysvals.verifyKprobes()
if(sysvals.usekprobes):
res = 'YES'
else:
sysvals.usedevsrc = False
print(' are kprobes supported: %s' % res)
# what data source are we using
res = 'DMESG'
if(ftgood):
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = False
for e in sysvals.traceevents:
check = False
if(os.path.exists(sysvals.epath+e)):
check = True
if(not check):
sysvals.usetraceeventsonly = False
if(e == 'suspend_resume' and check):
sysvals.usetraceevents = True
if(sysvals.usetraceevents and sysvals.usetraceeventsonly):
res = 'FTRACE (all trace events found)'
elif(sysvals.usetraceevents):
res = 'DMESG and FTRACE (suspend_resume trace event found)'
print(' timeline data source: %s' % res)
# check if rtcwake
res = sysvals.colorText('NO')
if(sysvals.rtcpath != ''):
res = 'YES'
elif(sysvals.rtcwake):
status = False
print(' is rtcwake supported: %s' % res)
if not probecheck:
return status
# verify kprobes
if sysvals.usekprobes:
for name in sysvals.tracefuncs:
sysvals.defaultKprobe(name, sysvals.tracefuncs[name])
if sysvals.usedevsrc:
for name in sysvals.dev_tracefuncs:
sysvals.defaultKprobe(name, sysvals.dev_tracefuncs[name])
sysvals.addKprobes(True)
return status
# Function: doError
# Description:
# generic error function for catastrphic failures
# Arguments:
# msg: the error message to print
# help: True if printHelp should be called after, False otherwise
def doError(msg, help=False):
if(help == True):
printHelp()
print('ERROR: %s\n') % msg
sys.exit()
# Function: getArgInt
# Description:
# pull out an integer argument from the command line with checks
def getArgInt(name, args, min, max, main=True):
if main:
try:
arg = args.next()
except:
doError(name+': no argument supplied', True)
else:
arg = args
try:
val = int(arg)
except:
doError(name+': non-integer value given', True)
if(val < min or val > max):
doError(name+': value should be between %d and %d' % (min, max), True)
return val
# Function: getArgFloat
# Description:
# pull out a float argument from the command line with checks
def getArgFloat(name, args, min, max, main=True):
if main:
try:
arg = args.next()
except:
doError(name+': no argument supplied', True)
else:
arg = args
try:
val = float(arg)
except:
doError(name+': non-numerical value given', True)
if(val < min or val > max):
doError(name+': value should be between %f and %f' % (min, max), True)
return val
def processData():
print('PROCESSING DATA')
if(sysvals.usetraceeventsonly):
testruns = parseTraceLog()
if sysvals.dmesgfile:
dmesgtext = loadKernelLog(True)
for data in testruns:
data.extractErrorInfo(dmesgtext)
else:
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.ftracefile and (sysvals.usecallgraph or sysvals.usetraceevents)):
appendIncompleteTraceLog(testruns)
createHTML(testruns)
return testruns
# Function: rerunTest
# Description:
# generate an output from an existing set of ftrace/dmesg logs
def rerunTest():
if sysvals.ftracefile:
doesTraceLogHaveTraceEvents()
if not sysvals.dmesgfile and not sysvals.usetraceeventsonly:
doError('recreating this html output requires a dmesg file')
sysvals.setOutputFile()
vprint('Output file: %s' % sysvals.htmlfile)
if os.path.exists(sysvals.htmlfile):
if not os.path.isfile(sysvals.htmlfile):
doError('a directory already exists with this name: %s' % sysvals.htmlfile)
elif not os.access(sysvals.htmlfile, os.W_OK):
doError('missing permission to write to %s' % sysvals.htmlfile)
return processData()
# Function: runTest
# Description:
# execute a suspend/resume, gather the logs, and generate the output
def runTest():
# prepare for the test
sysvals.initFtrace()
sysvals.initTestOutput('suspend')
vprint('Output files:\n\t%s\n\t%s\n\t%s' % \
(sysvals.dmesgfile, sysvals.ftracefile, sysvals.htmlfile))
# execute the test
executeSuspend()
sysvals.cleanupFtrace()
processData()
# if running as root, change output dir owner to sudo_user
if os.path.isdir(sysvals.testdir) and os.getuid() == 0 and \
'SUDO_USER' in os.environ:
cmd = 'chown -R {0}:{0} {1} > /dev/null 2>&1'
call(cmd.format(os.environ['SUDO_USER'], sysvals.testdir), shell=True)
def find_in_html(html, strs, div=False):
for str in strs:
l = len(str)
i = html.find(str)
if i >= 0:
break
if i < 0:
return ''
if not div:
return re.search(r'[-+]?\d*\.\d+|\d+', html[i+l:i+l+50]).group()
n = html[i+l:].find('</div>')
if n < 0:
return ''
return html[i+l:i+l+n]
# Function: runSummary
# Description:
# create a summary of tests in a sub-directory
def runSummary(subdir, local=True):
inpath = os.path.abspath(subdir)
outpath = inpath
if local:
outpath = os.path.abspath('.')
print('Generating a summary of folder "%s"' % inpath)
testruns = []
for dirname, dirnames, filenames in os.walk(subdir):
for filename in filenames:
if(not re.match('.*.html', filename)):
continue
file = os.path.join(dirname, filename)
html = open(file, 'r').read(10000)
suspend = find_in_html(html,
['Kernel Suspend: ', 'Kernel Suspend Time: '])
resume = find_in_html(html,
['Kernel Resume: ', 'Kernel Resume Time: '])
line = find_in_html(html, ['<div class="stamp">'], True)
stmp = line.split()
if not suspend or not resume or len(stmp) < 4:
continue
data = {
'host': stmp[0],
'kernel': stmp[1],
'mode': stmp[2],
'time': string.join(stmp[3:], ' '),
'suspend': suspend,
'resume': resume,
'url': os.path.relpath(file, outpath),
}
if len(stmp) == 7:
data['kernel'] = 'unknown'
data['mode'] = stmp[1]
data['time'] = string.join(stmp[2:], ' ')
testruns.append(data)
outfile = os.path.join(outpath, 'summary.html')
print('Summary file: %s' % outfile)
createHTMLSummarySimple(testruns, outfile, inpath)
# Function: checkArgBool
# Description:
# check if a boolean string value is true or false
def checkArgBool(value):
yes = ['1', 'true', 'yes', 'on']
if value.lower() in yes:
return True
return False
# Function: configFromFile
# Description:
# Configure the script via the info in a config file
def configFromFile(file):
Config = ConfigParser.ConfigParser()
Config.read(file)
sections = Config.sections()
overridekprobes = False
overridedevkprobes = False
if 'Settings' in sections:
for opt in Config.options('Settings'):
value = Config.get('Settings', opt).lower()
if(opt.lower() == 'verbose'):
sysvals.verbose = checkArgBool(value)
elif(opt.lower() == 'addlogs'):
sysvals.dmesglog = sysvals.ftracelog = checkArgBool(value)
elif(opt.lower() == 'dev'):
sysvals.usedevsrc = checkArgBool(value)
elif(opt.lower() == 'proc'):
sysvals.useprocmon = checkArgBool(value)
elif(opt.lower() == 'x2'):
if checkArgBool(value):
sysvals.execcount = 2
elif(opt.lower() == 'callgraph'):
sysvals.usecallgraph = checkArgBool(value)
elif(opt.lower() == 'override-timeline-functions'):
overridekprobes = checkArgBool(value)
elif(opt.lower() == 'override-dev-timeline-functions'):
overridedevkprobes = checkArgBool(value)
elif(opt.lower() == 'devicefilter'):
sysvals.setDeviceFilter(value)
elif(opt.lower() == 'expandcg'):
sysvals.cgexp = checkArgBool(value)
elif(opt.lower() == 'srgap'):
if checkArgBool(value):
sysvals.srgap = 5
elif(opt.lower() == 'mode'):
sysvals.suspendmode = value
elif(opt.lower() == 'command'):
sysvals.testcommand = value
elif(opt.lower() == 'x2delay'):
sysvals.x2delay = getArgInt('-x2delay', value, 0, 60000, False)
elif(opt.lower() == 'predelay'):
sysvals.predelay = getArgInt('-predelay', value, 0, 60000, False)
elif(opt.lower() == 'postdelay'):
sysvals.postdelay = getArgInt('-postdelay', value, 0, 60000, False)
elif(opt.lower() == 'maxdepth'):
sysvals.max_graph_depth = getArgInt('-maxdepth', value, 0, 1000, False)
elif(opt.lower() == 'rtcwake'):
if value.lower() == 'off':
sysvals.rtcwake = False
else:
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('-rtcwake', value, 0, 3600, False)
elif(opt.lower() == 'timeprec'):
sysvals.setPrecision(getArgInt('-timeprec', value, 0, 6, False))
elif(opt.lower() == 'mindev'):
sysvals.mindevlen = getArgFloat('-mindev', value, 0.0, 10000.0, False)
elif(opt.lower() == 'callloop-maxgap'):
sysvals.callloopmaxgap = getArgFloat('-callloop-maxgap', value, 0.0, 1.0, False)
elif(opt.lower() == 'callloop-maxlen'):
sysvals.callloopmaxgap = getArgFloat('-callloop-maxlen', value, 0.0, 1.0, False)
elif(opt.lower() == 'mincg'):
sysvals.mincglen = getArgFloat('-mincg', value, 0.0, 10000.0, False)
elif(opt.lower() == 'output-dir'):
sysvals.testdir = sysvals.setOutputFolder(value)
if sysvals.suspendmode == 'command' and not sysvals.testcommand:
doError('No command supplied for mode "command"')
# compatibility errors
if sysvals.usedevsrc and sysvals.usecallgraph:
doError('-dev is not compatible with -f')
if sysvals.usecallgraph and sysvals.useprocmon:
doError('-proc is not compatible with -f')
if overridekprobes:
sysvals.tracefuncs = dict()
if overridedevkprobes:
sysvals.dev_tracefuncs = dict()
kprobes = dict()
kprobesec = 'dev_timeline_functions_'+platform.machine()
if kprobesec in sections:
for name in Config.options(kprobesec):
text = Config.get(kprobesec, name)
kprobes[name] = (text, True)
kprobesec = 'timeline_functions_'+platform.machine()
if kprobesec in sections:
for name in Config.options(kprobesec):
if name in kprobes:
doError('Duplicate timeline function found "%s"' % (name))
text = Config.get(kprobesec, name)
kprobes[name] = (text, False)
for name in kprobes:
function = name
format = name
color = ''
args = dict()
text, dev = kprobes[name]
data = text.split()
i = 0
for val in data:
# bracketted strings are special formatting, read them separately
if val[0] == '[' and val[-1] == ']':
for prop in val[1:-1].split(','):
p = prop.split('=')
if p[0] == 'color':
try:
color = int(p[1], 16)
color = '#'+p[1]
except:
color = p[1]
continue
# first real arg should be the format string
if i == 0:
format = val
# all other args are actual function args
else:
d = val.split('=')
args[d[0]] = d[1]
i += 1
if not function or not format:
doError('Invalid kprobe: %s' % name)
for arg in re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', format):
if arg not in args:
doError('Kprobe "%s" is missing argument "%s"' % (name, arg))
if (dev and name in sysvals.dev_tracefuncs) or (not dev and name in sysvals.tracefuncs):
doError('Duplicate timeline function found "%s"' % (name))
kp = {
'name': name,
'func': function,
'format': format,
sysvals.archargs: args
}
if color:
kp['color'] = color
if dev:
sysvals.dev_tracefuncs[name] = kp
else:
sysvals.tracefuncs[name] = kp
# Function: printHelp
# Description:
# print out the help text
def printHelp():
print('')
print('%s v%s' % (sysvals.title, sysvals.version))
print('Usage: sudo sleepgraph <options> <commands>')
print('')
print('Description:')
print(' This tool is designed to assist kernel and OS developers in optimizing')
print(' their linux stack\'s suspend/resume time. Using a kernel image built')
print(' with a few extra options enabled, the tool will execute a suspend and')
print(' capture dmesg and ftrace data until resume is complete. This data is')
print(' transformed into a device timeline and an optional callgraph to give')
print(' a detailed view of which devices/subsystems are taking the most')
print(' time in suspend/resume.')
print('')
print(' If no specific command is given, the default behavior is to initiate')
print(' a suspend/resume and capture the dmesg/ftrace output as an html timeline.')
print('')
print(' Generates output files in subdirectory: suspend-yymmdd-HHMMSS')
print(' HTML output: <hostname>_<mode>.html')
print(' raw dmesg output: <hostname>_<mode>_dmesg.txt')
print(' raw ftrace output: <hostname>_<mode>_ftrace.txt')
print('')
print('Options:')
print(' -h Print this help text')
print(' -v Print the current tool version')
print(' -config fn Pull arguments and config options from file fn')
print(' -verbose Print extra information during execution and analysis')
print(' -m mode Mode to initiate for suspend (default: %s)') % (sysvals.suspendmode)
print(' -o name Overrides the output subdirectory name when running a new test')
print(' default: suspend-{date}-{time}')
print(' -rtcwake t Wakeup t seconds after suspend, set t to "off" to disable (default: 15)')
print(' -addlogs Add the dmesg and ftrace logs to the html output')
print(' -srgap Add a visible gap in the timeline between sus/res (default: disabled)')
print(' [advanced]')
print(' -cmd {s} Run the timeline over a custom command, e.g. "sync -d"')
print(' -proc Add usermode process info into the timeline (default: disabled)')
print(' -dev Add kernel function calls and threads to the timeline (default: disabled)')
print(' -x2 Run two suspend/resumes back to back (default: disabled)')
print(' -x2delay t Include t ms delay between multiple test runs (default: 0 ms)')
print(' -predelay t Include t ms delay before 1st suspend (default: 0 ms)')
print(' -postdelay t Include t ms delay after last resume (default: 0 ms)')
print(' -mindev ms Discard all device blocks shorter than ms milliseconds (e.g. 0.001 for us)')
print(' -multi n d Execute <n> consecutive tests at <d> seconds intervals. The outputs will')
print(' be created in a new subdirectory with a summary page.')
print(' [debug]')
print(' -f Use ftrace to create device callgraphs (default: disabled)')
print(' -maxdepth N limit the callgraph data to N call levels (default: 0=all)')
print(' -expandcg pre-expand the callgraph data in the html output (default: disabled)')
print(' -fadd file Add functions to be graphed in the timeline from a list in a text file')
print(' -filter "d1,d2,..." Filter out all but this comma-delimited list of device names')
print(' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)')
print(' -cgphase P Only show callgraph data for phase P (e.g. suspend_late)')
print(' -cgtest N Only show callgraph data for test N (e.g. 0 or 1 in an x2 run)')
print(' -timeprec N Number of significant digits in timestamps (0:S, [3:ms], 6:us)')
print('')
print('Other commands:')
print(' -modes List available suspend modes')
print(' -status Test to see if the system is enabled to run this tool')
print(' -fpdt Print out the contents of the ACPI Firmware Performance Data Table')
print(' -sysinfo Print out system info extracted from BIOS')
print(' -usbtopo Print out the current USB topology with power info')
print(' -usbauto Enable autosuspend for all connected USB devices')
print(' -flist Print the list of functions currently being captured in ftrace')
print(' -flistall Print all functions capable of being captured in ftrace')
print(' -summary directory Create a summary of all test in this dir')
print(' [redo]')
print(' -ftrace ftracefile Create HTML output using ftrace input (used with -dmesg)')
print(' -dmesg dmesgfile Create HTML output using dmesg (used with -ftrace)')
print('')
return True
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
cmd = ''
outdir = ''
multitest = {'run': False, 'count': 0, 'delay': 0}
simplecmds = ['-sysinfo', '-modes', '-fpdt', '-flist', '-flistall', '-usbtopo', '-usbauto', '-status']
# loop through the command line arguments
args = iter(sys.argv[1:])
for arg in args:
if(arg == '-m'):
try:
val = args.next()
except:
doError('No mode supplied', True)
if val == 'command' and not sysvals.testcommand:
doError('No command supplied for mode "command"', True)
sysvals.suspendmode = val
elif(arg in simplecmds):
cmd = arg[1:]
elif(arg == '-h'):
printHelp()
sys.exit()
elif(arg == '-v'):
print("Version %s" % sysvals.version)
sys.exit()
elif(arg == '-x2'):
sysvals.execcount = 2
elif(arg == '-x2delay'):
sysvals.x2delay = getArgInt('-x2delay', args, 0, 60000)
elif(arg == '-predelay'):
sysvals.predelay = getArgInt('-predelay', args, 0, 60000)
elif(arg == '-postdelay'):
sysvals.postdelay = getArgInt('-postdelay', args, 0, 60000)
elif(arg == '-f'):
sysvals.usecallgraph = True
elif(arg == '-addlogs'):
sysvals.dmesglog = sysvals.ftracelog = True
elif(arg == '-verbose'):
sysvals.verbose = True
elif(arg == '-proc'):
sysvals.useprocmon = True
elif(arg == '-dev'):
sysvals.usedevsrc = True
elif(arg == '-maxdepth'):
sysvals.max_graph_depth = getArgInt('-maxdepth', args, 0, 1000)
elif(arg == '-rtcwake'):
try:
val = args.next()
except:
doError('No rtcwake time supplied', True)
if val.lower() == 'off':
sysvals.rtcwake = False
else:
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('-rtcwake', val, 0, 3600, False)
elif(arg == '-timeprec'):
sysvals.setPrecision(getArgInt('-timeprec', args, 0, 6))
elif(arg == '-mindev'):
sysvals.mindevlen = getArgFloat('-mindev', args, 0.0, 10000.0)
elif(arg == '-mincg'):
sysvals.mincglen = getArgFloat('-mincg', args, 0.0, 10000.0)
elif(arg == '-cgtest'):
sysvals.cgtest = getArgInt('-cgtest', args, 0, 1)
elif(arg == '-cgphase'):
try:
val = args.next()
except:
doError('No phase name supplied', True)
d = Data(0)
if val not in d.phases:
doError('Invalid phase, valid phaess are %s' % d.phases, True)
sysvals.cgphase = val
elif(arg == '-callloop-maxgap'):
sysvals.callloopmaxgap = getArgFloat('-callloop-maxgap', args, 0.0, 1.0)
elif(arg == '-callloop-maxlen'):
sysvals.callloopmaxlen = getArgFloat('-callloop-maxlen', args, 0.0, 1.0)
elif(arg == '-cmd'):
try:
val = args.next()
except:
doError('No command string supplied', True)
sysvals.testcommand = val
sysvals.suspendmode = 'command'
elif(arg == '-expandcg'):
sysvals.cgexp = True
elif(arg == '-srgap'):
sysvals.srgap = 5
elif(arg == '-multi'):
multitest['run'] = True
multitest['count'] = getArgInt('-multi n (exec count)', args, 2, 1000000)
multitest['delay'] = getArgInt('-multi d (delay between tests)', args, 0, 3600)
elif(arg == '-o'):
try:
val = args.next()
except:
doError('No subdirectory name supplied', True)
outdir = sysvals.setOutputFolder(val)
elif(arg == '-config'):
try:
val = args.next()
except:
doError('No text file supplied', True)
if(os.path.exists(val) == False):
doError('%s does not exist' % val)
configFromFile(val)
elif(arg == '-fadd'):
try:
val = args.next()
except:
doError('No text file supplied', True)
if(os.path.exists(val) == False):
doError('%s does not exist' % val)
sysvals.addFtraceFilterFunctions(val)
elif(arg == '-dmesg'):
try:
val = args.next()
except:
doError('No dmesg file supplied', True)
sysvals.notestrun = True
sysvals.dmesgfile = val
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s does not exist' % sysvals.dmesgfile)
elif(arg == '-ftrace'):
try:
val = args.next()
except:
doError('No ftrace file supplied', True)
sysvals.notestrun = True
sysvals.ftracefile = val
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
elif(arg == '-summary'):
try:
val = args.next()
except:
doError('No directory supplied', True)
cmd = 'summary'
outdir = val
sysvals.notestrun = True
if(os.path.isdir(val) == False):
doError('%s is not accesible' % val)
elif(arg == '-filter'):
try:
val = args.next()
except:
doError('No devnames supplied', True)
sysvals.setDeviceFilter(val)
else:
doError('Invalid argument: '+arg, True)
# compatibility errors
if(sysvals.usecallgraph and sysvals.usedevsrc):
doError('-dev is not compatible with -f')
if(sysvals.usecallgraph and sysvals.useprocmon):
doError('-proc is not compatible with -f')
# callgraph size cannot exceed device size
if sysvals.mincglen < sysvals.mindevlen:
sysvals.mincglen = sysvals.mindevlen
# just run a utility command and exit
sysvals.cpuInfo()
if(cmd != ''):
if(cmd == 'status'):
statusCheck(True)
elif(cmd == 'fpdt'):
getFPDT(True)
elif(cmd == 'sysinfo'):
sysvals.printSystemInfo()
elif(cmd == 'usbtopo'):
detectUSB()
elif(cmd == 'modes'):
print getModes()
elif(cmd == 'flist'):
sysvals.getFtraceFilterFunctions(True)
elif(cmd == 'flistall'):
sysvals.getFtraceFilterFunctions(False)
elif(cmd == 'usbauto'):
setUSBDevicesAuto()
elif(cmd == 'summary'):
runSummary(outdir, True)
sys.exit()
# if instructed, re-analyze existing data files
if(sysvals.notestrun):
rerunTest()
sys.exit()
# verify that we can run a test
if(not statusCheck()):
print('Check FAILED, aborting the test run!')
sys.exit()
# extract mem modes and convert
mode = sysvals.suspendmode
if 'mem' == mode[:3]:
if '-' in mode:
memmode = mode.split('-')[-1]
else:
memmode = 'deep'
if memmode == 'shallow':
mode = 'standby'
elif memmode == 's2idle':
mode = 'freeze'
else:
mode = 'mem'
sysvals.memmode = memmode
sysvals.suspendmode = mode
sysvals.systemInfo(dmidecode(sysvals.mempath))
if multitest['run']:
# run multiple tests in a separate subdirectory
if not outdir:
s = 'suspend-x%d' % multitest['count']
outdir = datetime.now().strftime(s+'-%y%m%d-%H%M%S')
if not os.path.isdir(outdir):
os.mkdir(outdir)
for i in range(multitest['count']):
if(i != 0):
print('Waiting %d seconds...' % (multitest['delay']))
time.sleep(multitest['delay'])
print('TEST (%d/%d) START' % (i+1, multitest['count']))
fmt = 'suspend-%y%m%d-%H%M%S'
sysvals.testdir = os.path.join(outdir, datetime.now().strftime(fmt))
runTest()
print('TEST (%d/%d) COMPLETE' % (i+1, multitest['count']))
runSummary(outdir, False)
else:
if outdir:
sysvals.testdir = outdir
# run the test in the current directory
runTest()
| gpl-2.0 |
lisa-lab/pylearn2 | pylearn2/scripts/papers/jia_huang_wkshp_11/evaluate.py | 44 | 3208 | from __future__ import print_function
from optparse import OptionParser
import warnings
try:
from sklearn.metrics import classification_report
except ImportError:
classification_report = None
warnings.warn("couldn't find sklearn.metrics.classification_report")
try:
from sklearn.metrics import confusion_matrix
except ImportError:
confusion_matrix = None
warnings.warn("couldn't find sklearn.metrics.metrics.confusion_matrix")
from galatea.s3c.feature_loading import get_features
from pylearn2.utils import serial
from pylearn2.datasets.cifar10 import CIFAR10
from pylearn2.datasets.cifar100 import CIFAR100
import numpy as np
def test(model, X, y):
print("Evaluating svm")
y_pred = model.predict(X)
#try:
if True:
acc = (y == y_pred).mean()
print("Accuracy ",acc)
"""except:
print("something went wrong")
print('y:')
print(y)
print('y_pred:')
print(y_pred)
print('extra info')
print(type(y))
print(type(y_pred))
print(y.dtype)
print(y_pred.dtype)
print(y.shape)
print(y_pred.shape)
raise
"""
#
def get_test_labels(cifar10, cifar100, stl10):
assert cifar10 + cifar100 + stl10 == 1
if stl10:
print('loading entire stl-10 test set just to get the labels')
stl10 = serial.load("${PYLEARN2_DATA_PATH}/stl10/stl10_32x32/test.pkl")
return stl10.y
if cifar10:
print('loading entire cifar10 test set just to get the labels')
cifar10 = CIFAR10(which_set = 'test')
return np.asarray(cifar10.y)
if cifar100:
print('loading entire cifar100 test set just to get the fine labels')
cifar100 = CIFAR100(which_set = 'test')
return np.asarray(cifar100.y_fine)
assert False
def main(model_path,
test_path,
dataset,
**kwargs):
model = serial.load(model_path)
cifar100 = dataset == 'cifar100'
cifar10 = dataset == 'cifar10'
stl10 = dataset == 'stl10'
assert cifar10 + cifar100 + stl10 == 1
y = get_test_labels(cifar10, cifar100, stl10)
X = get_features(test_path, False, False)
if stl10:
num_examples = 8000
if cifar10 or cifar100:
num_examples = 10000
if not X.shape[0] == num_examples:
raise AssertionError('Expected %d examples but got %d' % (num_examples, X.shape[0]))
assert y.shape[0] == num_examples
test(model,X,y)
if __name__ == '__main__':
"""
Useful for quick tests.
Usage: python train_bilinear.py
"""
parser = OptionParser()
parser.add_option("-m", "--model",
action="store", type="string", dest="model_path")
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-o", action="store", dest="output", default = None, help="path to write the report to")
parser.add_option('--dataset', type='string', dest = 'dataset', action='store', default = None)
#(options, args) = parser.parse_args()
#assert options.output
main(model_path='final_model.pkl',
test_path='test_features.npy',
dataset = 'cifar100',
)
| bsd-3-clause |
OFAI/hub-toolbox-python3 | setup.py | 1 | 3998 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file is part of the HUB TOOLBOX available at
https://github.com/OFAI/hub-toolbox-python3/
The HUB TOOLBOX is licensed under the terms of the GNU GPLv3.
(c) 2011-2018, Dominik Schnitzer and Roman Feldbauer
Austrian Research Institute for Artificial Intelligence (OFAI)
Contact: <roman.feldbauer@ofai.at>
Installation:
-------------
In the console (terminal application) change to the folder containing this file.
To build the package hub_toolbox:
python3 setup.py build
To install the package (with administrator rights):
sudo python3 setup.py install
To test the installation:
sudo python3 setup.py test
If this succeeds with an 'OK' message, you are ready to go.
Otherwise you may consider filing a bug report on github.
(Some skipped tests are perfectly fine, though.)
"""
import re, os, sys
REQ_MAJOR = 3
REQ_MINOR = 6
if sys.version_info < (REQ_MAJOR, REQ_MINOR):
sys.stdout.write(
(f"The HUB TOOLBOX requires Python {REQ_MAJOR}.{REQ_MINOR} or higher."
f"\nPlease try to run as python3 setup.py or update your Python "
f"environment.\n Consider using Anaconda for easy package handling."))
sys.exit(1)
try:
import numpy, scipy, sklearn # @UnusedImport
except ImportError:
sys.stdout.write("The HUB TOOLBOX requires numpy, scipy and scikit-learn. "
"Please make sure these packages are available locally. "
"Consider using Anaconda for easy package handling.\n")
try:
import pandas, joblib # @UnusedImport
except ImportError:
sys.stdout.write("Some modules of the HUB TOOLBOX require pandas and joblib. "
"Please make sure these packages are available locally. "
"Consider using Anaconda for easy package handling.\n")
try:
import nmslib, falconn # @UnusedImport
except ImportError:
sys.stdout.write("The 'approximate' module uses 'nmslib' and 'falconn' "
"libraries for approximate nearest neighbor search. "
"Please make sure these packages are available locally. "
"Consider using Anaconda for easy package handling.\n")
setup_options = {}
try:
from setuptools import setup
setup_options['test_suite'] = 'tests'
except ImportError:
from distutils.core import setup
import warnings
warnings.warn("setuptools not found, resorting to distutils. "
"Unit tests won't be discovered automatically.")
# Parsing current version number
# Adapted from the Lasagne project at
# https://github.com/Lasagne/Lasagne/blob/master/setup.py
here = os.path.abspath(os.path.dirname(__file__))
try:
# obtain version string from __init__.py
# Read ASCII file with builtin open() so __version__ is str in Python 2 and 3
with open(os.path.join(here, 'hub_toolbox', '__init__.py'), 'r') as f:
init_py = f.read()
version = re.search("__version__ = '(.*)'", init_py).groups()[0]
except Exception:
version = ''
setup(
name = "hub_toolbox",
version = version,
author = "Roman Feldbauer",
author_email = "roman.feldbauer@ofai.at",
maintainer = "Roman Feldbauer",
maintainer_email = "roman.feldbauer@ofai.at",
description = "Hubness reduction and analysis tools",
license = "GNU GPLv3",
keywords = ["machine learning", "data science"],
url = "https://github.com/OFAI/hub-toolbox-python3",
packages=['hub_toolbox', 'tests'],
package_data={'hub_toolbox': ['example_datasets/*']},
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 "
"or later (GPLv3+)",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering"
],
**setup_options
)
| gpl-3.0 |
pazeshun/jsk_apc | demos/instance_occlsegm/ros/synthetic2d/nodes/place_planning.py | 2 | 6463 | #!/usr/bin/env python
# flake8: noqa
import numpy as np
def get_place_mask(img, bboxes, labels, masks, obstacles, target_id, pick, debug=0, n_times=10, mask_fg=None):
import chainer_mask_rcnn as cmr
import instance_occlsegm_lib
from skimage.segmentation import slic
from scipy.ndimage import center_of_mass
from skimage.transform import rotate
from skimage.transform import rescale
lbl_ins, _ = cmr.utils.instance_boxes2label(labels + 1, bboxes, masks=np.isin(masks, (1,)))
lbl_ins2, _ = cmr.utils.instance_boxes2label(
labels[np.arange(len(labels)) != pick] + 1,
bboxes[np.arange(len(labels)) != pick],
masks=np.isin(masks, (1, 2)))
# objects_in_graph = [o + 1 for o in objects_in_graph]
mask = lbl_ins == pick
cy, cx = center_of_mass(mask)
xmin, ymin, xmax, ymax = instance_occlsegm_lib.image.mask_to_bbox(mask)
mask_obj = mask[ymin:ymax, xmin:xmax]
# plt.imshow(mask_obj, cmap='gray')
# plt.show()
segments = slic(lbl_ins, n_segments=50)
mask_placable = lbl_ins == -1
if mask_fg is not None:
mask_placable = np.bitwise_and(mask_placable, mask_fg)
# plt.imshow(lbl_cls2)
# plt.imshow(mask_placable)
# plt.show()
disabled = np.unique(segments[~mask_placable])
for s in disabled:
segments[segments == s] = -1
# plt.imshow(segments)
# plt.imshow(mask_placable, cmap='gray')
# plt.show()
distances = []
for s in np.unique(segments):
if s == -1:
continue
mask_s = segments == s
cy_s, cx_s = center_of_mass(mask_s)
d = np.sqrt((cx - cx_s) ** 2 + (cy - cy_s) ** 2)
distances.append((s, d))
distances = sorted(distances, key=lambda x: x[1])
for l, d in distances[:n_times]:
R = 8
for r in range(0, R):
mask_obj_r0 = rotate(mask_obj, resize=True, angle=r * (360. / R), order=0)
# mask_obj_r1 = rescale(mask_obj_r0, 1.5, mode='constant', multichannel=False, anti_aliasing=False)
mask_obj_r1 = rescale(mask_obj_r0, 1.1, mode='constant')
mask_obj_r1 = mask_obj_r1 >= 0.5
# plt.subplot(121)
# plt.imshow(mask_obj_r0)
# plt.subplot(122)
# plt.imshow(mask_obj_r1)
# plt.show()
H, W = mask.shape[:2]
mask_s = segments == l
cy_s, cx_s = center_of_mass(mask_s)
def get_mask_t(mask_obj_r):
h, w = mask_obj_r.shape[:2]
cy_o, cx_o = center_of_mass(mask_obj_r)
dymax = mask_obj_r.shape[0] - cy_o
dymin = 0 - cy_o
dxmax = mask_obj_r.shape[1] - cx_o
dxmin = 0 - cx_o
ymax_t = int(cy_s + dymax)
ymin_t = ymax_t - h
# ymin_t = int(cy_s + dymin)
xmax_t = int(cx_s + dxmax)
xmin_t = xmax_t - w
# xmin_t = int(cx_s + dxmin)
if not (0 <= ymax_t <= H and 0 <= ymin_t <= H and
0 <= xmax_t <= W and 0 <= xmin_t <= W):
return None
mask_t = np.zeros_like(mask)
mask_t[ymin_t:ymax_t, xmin_t:xmax_t] = mask_obj_r
return mask_t
mask_t1 = get_mask_t(mask_obj_r1)
mask_t0 = get_mask_t(mask_obj_r0)
if mask_t0 is None or mask_t1 is None:
continue
# instance_occlsegm_lib.io.tileimg([
# mask_t1,
# mask_placable,
# np.bitwise_or(mask_t1, mask_placable),
# np.bitwise_and(mask_t1, ~mask_placable)
# ])
# instance_occlsegm_lib.io.show()
if 1. * np.sum(mask_t1 & ~mask_placable) / mask_t1.sum() < 0.05:
if debug:
instance_occlsegm_lib.io.tileimg([
img,
mask,
mask_placable,
np.bitwise_or(mask_t1, ~mask_placable),
np.bitwise_or(mask_t0, ~mask_placable),
mask_t0
])
instance_occlsegm_lib.io.show()
return mask_t0
# plt.imshow(segments)
# plt.plot([cx_s], [cy_s], 'o', color='r')
# plt.show()
def main():
data = np.load('book_and_tennis_ball.npz')
img = data['img']
bboxes = data['bboxes']
labels = data['labels']
masks = data['masks']
objects_in_graph = [34, 7]
target = 34
get_place_mask(img, bboxes, labels, masks, objects_in_graph, target, debug=1)
if __name__ == '__main__':
main()
# def apply_pca(mask):
# from sklearn.decomposition import PCA
#
# pca = PCA()
# xy = np.argwhere(mask)
# pca.fit(xy)
# xy_trans = pca.fit_transform(xy)
# cy, cx = pca.mean_
#
# axis0_min = xy_trans[:, 0].min()
# axis0_max = xy_trans[:, 0].max()
# axis1_min = xy_trans[:, 1].min()
# axis1_max = xy_trans[:, 1].max()
#
# yx0_max = axis0_max * pca.components_[0] + pca.mean_
# yx0_min = axis0_min * pca.components_[0] + pca.mean_
# yx1_max = axis1_max * pca.components_[1] + pca.mean_
# yx1_min = axis1_min * pca.components_[1] + pca.mean_
#
# # visualize
# viz = img.copy()
# cv2.circle(viz, (int(cx), int(cy)), radius=5, color=(0, 255, 0), thickness=-1)
# # long axis
# cv2.line(viz, (int(yx0_min[1]), int(yx0_min[0])), (int(yx0_max[1]), int(yx0_max[0])), color=(0, 255, 0), thickness=1)
# cv2.circle(viz, (int(yx0_max[1]), int(yx0_max[0])), radius=5, color=(0, 0, 255), thickness=-1)
# cv2.circle(viz, (int(yx0_min[1]), int(yx0_min[0])), radius=5, color=(255, 0, 0), thickness=-1)
# # short axis
# cv2.line(viz, (int(yx1_min[1]), int(yx1_min[0])), (int(yx1_max[1]), int(yx1_max[0])), color=(0, 255, 0), thickness=1)
# cv2.circle(viz, (int(yx1_max[1]), int(yx1_max[0])), radius=5, color=(0, 0, 255), thickness=-1)
# cv2.circle(viz, (int(yx1_min[1]), int(yx1_min[0])), radius=5, color=(255, 0, 0), thickness=-1)
# plt.imshow(viz)
# plt.show()
#
# viz = img.copy()
# mask_flat = mask.flatten()
# index = np.random.choice(np.argwhere(~mask_flat)[:, 0])
# x = index % mask.shape[1]
# y = index // mask.shape[1]
# cv2.circle(viz, (x, y), radius=5, color=(0, 255, 0), thickness=-1)
# plt.imshow(viz)
# plt.show()
| bsd-3-clause |
jcrudy/sklearntools | sklearntools/test/validation_plots.py | 1 | 1112 | from sklearntools.validation import plot_tolerance, plot_roc,\
calibration_bin_plot, plot_roc_auc_for_bins
if __name__ == '__main__':
from sklearntools.validation import plot_curve_auc, roc_curve
from sklearn.linear_model.logistic import LogisticRegression
from scipy.special._ufuncs import expit
import numpy as np
from matplotlib import pyplot
np.random.seed(1)
m = 1000
n = 5
X = np.random.normal(size=(m,n))
beta = np.random.normal(size=n)
y = np.random.binomial(n=1, p=expit(np.dot(X, beta)))
model = LogisticRegression().fit(X, y)
pred = model.predict_proba(X)[:,1]
pyplot.figure()
plot_roc(y, pred, name='test_model')
pyplot.savefig('test_roc_plot.png')
pyplot.figure()
plot_tolerance(y, pred, name='test_model', normalize=True)
pyplot.savefig('test_tolerance_plot.png')
pyplot.figure()
calibration_bin_plot(pred, y, pred, )
pyplot.savefig('test_calibration_plot.png')
pyplot.figure()
plot_roc_auc_for_bins(10, X[:,0], y, pred)
pyplot.savefig('test_bin_auc_plot.png') | bsd-3-clause |
nansencenter/nansat | nansat/utils.py | 1 | 9214 | # Name: utils.py
# Purpose: collection of data and funcs used in NANSAT modules
# Authors: Asuka Yamakawa, Anton Korosov, Knut-Frode Dagestad,
# Morten W. Hansen, Alexander Myasoyedov,
# Dmitry Petrenko, Evgeny Morozov
# Created: 29.06.2011
# Copyright: (c) NERSC 2011 - 2013
# Licence:
# This file is part of NANSAT.
# NANSAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
# http://www.gnu.org/licenses/gpl-3.0.html
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
from __future__ import absolute_import
import os
import warnings
import logging
from dateutil.parser import parse
try:
import matplotlib
except ImportError:
MATPLOTLIB_IS_INSTALLED = False
else:
MATPLOTLIB_IS_INSTALLED = True
if 'DISPLAY' not in os.environ:
matplotlib.use('Agg')
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import hex2color
from matplotlib import cm
import numpy as np
try:
import gdal, ogr, osr
except:
from osgeo import gdal, ogr, osr
gdal.UseExceptions()
NUMPY_TO_GDAL_TYPE_MAP = {
'uint8': 1,
'int8': 1,
'uint16': 2,
'int16': 3,
'uint32': 4,
'int32': 5,
'float32': 6,
'float64': 7,
'complex64': 10,
'complex128': 11
}
def remove_keys(dict, keys):
if keys is None:
keys = []
for key in keys:
dict.pop(key, None)
return dict
def register_colormaps():
''' Create custom colormaps and register them '''
obpg = {'red': [(0.00, 0.56, 0.56),
(0.19, 0.00, 0.00),
(0.38, 0.00, 0.00),
(0.50, 0.00, 0.00),
(0.63, 1.00, 1.00),
(0.88, 1.00, 1.00),
(1.00, 0.40, 0.40)],
'green': [(0.00, 0.00, 0.00),
(0.19, 0.00, 0.00),
(0.38, 1.00, 1.00),
(0.50, 1.00, 1.00),
(0.63, 1.00, 1.00),
(0.88, 0.00, 0.00),
(1.00, 0.00, 0.00)],
'blue': [(0.00, 0.43, 0.43),
(0.19, 1.00, 1.00),
(0.38, 1.00, 1.00),
(0.50, 0.00, 0.00),
(0.63, 0.00, 0.00),
(0.88, 0.00, 0.00),
(1.00, 0.00, 0.00)],
}
ak01 = {'red': [(0, 0.1, 0.1,),
(0.1, 0.56, 0.56,),
(0.22, 0, 0,),
(0.27, 0, 0,),
(0.37, 0.3, 0.3,),
(0.47, 0, 0,),
(0.52, 0, 0,),
(0.64, 1, 1,),
(0.76, 1, 1,),
(0.88, 0.4, 0.4,),
(1, 1, 1,)],
'green': [(0, 0, 0,),
(0.1, 0, 0,),
(0.22, 0, 0,),
(0.27, 0, 0,),
(0.37, 0.6, 0.6,),
(0.47, 0.6, 0.6,),
(0.52, 1, 1,),
(0.64, 1, 1,),
(0.76, 0, 0,),
(0.88, 0, 0,),
(1, 0.5, 0.5,)],
'blue': [(0, 0.1, 0.1,),
(0.1, 0.5, 0.5,),
(0.22, 0.5, 0.5,),
(0.27, 1, 1,),
(0.37, 1, 1,),
(0.47, 0, 0,),
(0.52, 0, 0,),
(0.64, 0, 0,),
(0.76, 0, 0,),
(0.88, 0, 0,),
(1, 0.5, 0.5,)],
}
if MATPLOTLIB_IS_INSTALLED:
cm.register_cmap(cmap=LinearSegmentedColormap('obpg', obpg, 256))
cm.register_cmap(cmap=LinearSegmentedColormap('ak01', ak01, 256))
def initial_bearing(lon1, lat1, lon2, lat2):
"""Initial bearing when traversing from point1 (lon1, lat1)
to point2 (lon2, lat2)
See http://www.movable-type.co.uk/scripts/latlong.html
Parameters
----------
lon1, lat1 : float
longitude and latitude of start point
lon2, lat2 : float
longitude and latitude of end point
Returns
-------
initial_bearing : float
The initial bearing (azimuth direction) when heading out
from the start point towards the end point along a great circle.
"""
rlon1 = np.radians(lon1)
rlat1 = np.radians(lat1)
rlon2 = np.radians(lon2)
rlat2 = np.radians(lat2)
bearing = np.arctan2(np.sin(rlon2 - rlon1) * np.cos(rlat2),
np.cos(rlat1) * np.sin(rlat2) -
np.sin(rlat1) * np.cos(rlat2) *
np.cos(rlon2 - rlon1))
return np.mod(np.degrees(bearing) + 360, 360)
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the spherical earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
c = 2 * np.arcsin(np.sqrt(a))
distance_meters = 6367000 * c
return distance_meters
def add_logger(logName='', logLevel=None):
""" Creates and returns logger with default formatting for Nansat
Parameters
-----------
logName : string, optional
Name of the logger
Returns
--------
logging.logger
See also
--------
`<http://docs.python.org/howto/logging.html>`_
"""
if logLevel is not None:
os.environ['LOG_LEVEL'] = str(logLevel)
# create (or take already existing) logger
# with default logging level WARNING
logger = logging.getLogger(logName)
logger.setLevel(int(os.environ['LOG_LEVEL']))
# if logger already exits, default stream handler has been already added
# otherwise create and add a new handler
if len(logger.handlers) == 0:
# create console handler and set level to debug
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter('%(asctime)s|%(levelno)s|%(module)s|'
'%(funcName)s|%(message)s',
datefmt='%I:%M:%S')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.handlers[0].setLevel(int(os.environ['LOG_LEVEL']))
return logger
def get_random_color(c0=None, minDist=100, low=0, high=255):
"""Create random color which is far enough from the input color
Parameters
----------
c0 : str
hexademical representation of the color (e.g. '#ff0000' for red)
minDist : int
minimal distance to input color
Returns
-------
c0 : str
hexademical representation of the new random color
"""
if not MATPLOTLIB_IS_INSTALLED:
raise ImportError('Matplotlib is not installed')
# check inputs
if c0 is None:
c0 = '#000000'
# convert input color to tuple of R,G,B
c0rgb = np.array(hex2color(c0))
# create new random color
c1rgb = np.array([np.random.randint(low, high),
np.random.randint(low, high),
np.random.randint(low, high)])
# calculate distance
d = np.sum((c0rgb - c1rgb)**2)**0.5
# if distance is small, create new random color
if d < minDist:
c1 = get_random_color(c0, minDist)
else:
# convert to HEX code
c1 = '#%02x%02x%02x' % tuple(c1rgb)
return c1
def parse_time(time_string):
''' Parse time string accounting for possible wrong formatting
Parameters
----------
time_string : str
string with date and time
Returns
-------
time_value : datetime object
'''
time_string = time_string.strip()
# To account for datasets on the format YYYY-MM-DDZ which is
# invalid since it has no time, but a timezone
try:
time_value = parse(time_string)
except ValueError:
if (len(time_string) == 11 and
time_string.endswith('Z')):
time_value = parse(time_string[:10])
return time_value
register_colormaps()
numpy_to_gdal_type = {
'uint8': 'Byte',
'int8': 'Byte',
'uint16': 'UInt16',
'int16': 'Int16',
'uint32': 'UInt32',
'int32': 'Int32',
'float32': 'Float32',
'float64': 'Float64',
'complex64': 'CFloat32',
'complex128': 'CFloat64'}
gdal_type_to_offset = {
'Byte': '1',
'UInt16': '2',
'Int16': '2',
'UInt32': '4',
'Int32': '4',
'Float32': '4',
'Float64': '8',
'CFloat32': '8',
'CFloat64': '16'}
| gpl-3.0 |
vinayak-mehta/scikit-learn | sklearn/model_selection/_split.py | 8 | 95127 | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Raghav RV <rvraghav93@gmail.com>
# Leandro Hermida <hermidal@cs.umd.edu>
# Rodion Martynov <marrodion@gmail.com>
# License: BSD 3 clause
from collections.abc import Iterable
from collections import defaultdict
import warnings
from itertools import chain, combinations
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
from inspect import signature
import numpy as np
from scipy.special import comb
from ..utils import indexable, check_random_state, _safe_indexing
from ..utils import _approximate_mode
from ..utils.validation import _num_samples, column_or_1d
from ..utils.validation import check_array
from ..utils.multiclass import type_of_target
__all__ = [
"BaseCrossValidator",
"KFold",
"GroupKFold",
"LeaveOneGroupOut",
"LeaveOneOut",
"LeavePGroupsOut",
"LeavePOut",
"RepeatedStratifiedKFold",
"RepeatedKFold",
"ShuffleSplit",
"GroupShuffleSplit",
"StratifiedKFold",
"StratifiedGroupKFold",
"StratifiedShuffleSplit",
"PredefinedSplit",
"train_test_split",
"check_cv",
]
class BaseCrossValidator(metaclass=ABCMeta):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <leave_one_out>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for i, (train_index, test_index) in enumerate(loo.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[1]
Test: index=[0]
Fold 1:
Train: index=[0]
Test: index=[1]
See Also
--------
LeaveOneGroupOut : For splitting the data according to explicit,
domain-specific stratification of the dataset.
GroupKFold : K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
if n_samples <= 1:
raise ValueError(
"Cannot perform LeaveOneOut with n_samples={}.".format(n_samples)
)
return range(n_samples)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The 'X' parameter should not be None.")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <leave_p_out>`.
Parameters
----------
p : int
Size of the test sets. Must be strictly less than the number of
samples.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for i, (train_index, test_index) in enumerate(lpo.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[2 3]
Test: index=[0 1]
Fold 1:
Train: index=[1 3]
Test: index=[0 2]
Fold 2:
Train: index=[1 2]
Test: index=[0 3]
Fold 3:
Train: index=[0 3]
Test: index=[1 2]
Fold 4:
Train: index=[0 2]
Test: index=[1 3]
Fold 5:
Train: index=[0 1]
Test: index=[2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
if n_samples <= self.p:
raise ValueError(
"p={} must be strictly less than the number of samples={}".format(
self.p, n_samples
)
)
for combination in combinations(range(n_samples), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The 'X' parameter should not be None.")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(BaseCrossValidator, metaclass=ABCMeta):
"""Base class for KFold, GroupKFold, and StratifiedKFold"""
@abstractmethod
def __init__(self, n_splits, *, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError(
"The number of folds must be of Integral type. "
"%s of type %s was passed." % (n_splits, type(n_splits))
)
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits)
)
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False; got {0}".format(shuffle))
if not shuffle and random_state is not None: # None is the default
raise ValueError(
"Setting a random_state has no effect since shuffle is "
"False. You should leave "
"random_state to its default (None), or set shuffle=True.",
)
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
(
"Cannot have number of splits n_splits={0} greater"
" than the number of samples: n_samples={1}."
).format(self.n_splits, n_samples)
)
for train, test in super().split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <k_fold>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
shuffle : bool, default=False
Whether to shuffle the data before splitting into batches.
Note that the samples within each split will not be shuffled.
random_state : int, RandomState instance or None, default=None
When `shuffle` is True, `random_state` affects the ordering of the
indices, which controls the randomness of each fold. Otherwise, this
parameter has no effect.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits(X)
2
>>> print(kf)
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for i, (train_index, test_index) in enumerate(kf.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[2 3]
Test: index=[0 1]
Fold 1:
Train: index=[0 1]
Test: index=[2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
See Also
--------
StratifiedKFold : Takes class information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
GroupKFold : K-fold iterator variant with non-overlapping groups.
RepeatedKFold : Repeats K-Fold n times.
"""
def __init__(self, n_splits=5, *, shuffle=False, random_state=None):
super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state)
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = np.full(n_splits, n_samples // n_splits, dtype=int)
fold_sizes[: n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class GroupKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
Each group will appear exactly once in the test set across all folds (the
number of distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct groups is approximately the same in each fold.
Read more in the :ref:`User Guide <group_k_fold>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
Notes
-----
Groups appear in an arbitrary order throughout the folds.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]])
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> groups = np.array([0, 0, 2, 2, 3, 3])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits(X, y, groups)
2
>>> print(group_kfold)
GroupKFold(n_splits=2)
>>> for i, (train_index, test_index) in enumerate(group_kfold.split(X, y, groups)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}, group={groups[train_index]}")
... print(f" Test: index={test_index}, group={groups[test_index]}")
Fold 0:
Train: index=[2 3], group=[2 2]
Test: index=[0 1 4 5], group=[0 0 3 3]
Fold 1:
Train: index=[0 1 4 5], group=[0 0 3 3]
Test: index=[2 3], group=[2 2]
See Also
--------
LeaveOneGroupOut : For splitting the data according to explicit
domain-specific stratification of the dataset.
StratifiedKFold : Takes class information into account to avoid building
folds with imbalanced class proportions (for binary or multiclass
classification tasks).
"""
def __init__(self, n_splits=5):
super().__init__(n_splits, shuffle=False, random_state=None)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None)
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError(
"Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d." % (self.n_splits, n_groups)
)
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(groups)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super().split(X, y, groups)
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator.
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <stratified_k_fold>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
shuffle : bool, default=False
Whether to shuffle each class's samples before splitting into batches.
Note that the samples within each split will not be shuffled.
random_state : int, RandomState instance or None, default=None
When `shuffle` is True, `random_state` affects the ordering of the
indices, which controls the randomness of each fold for each class.
Otherwise, leave `random_state` as `None`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf)
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for i, (train_index, test_index) in enumerate(skf.split(X, y)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[1 3]
Test: index=[0 2]
Fold 1:
Train: index=[0 2]
Test: index=[1 3]
Notes
-----
The implementation is designed to:
* Generate test sets such that all contain the same distribution of
classes, or as close as possible.
* Be invariant to class label: relabelling ``y = ["Happy", "Sad"]`` to
``y = [1, 0]`` should not change the indices generated.
* Preserve order dependencies in the dataset ordering, when
``shuffle=False``: all samples from class k in some test set were
contiguous in y, or separated in y by samples from classes other than k.
* Generate test sets where the smallest and largest differ by at most one
sample.
.. versionchanged:: 0.22
The previous implementation did not follow the last constraint.
See Also
--------
RepeatedStratifiedKFold : Repeats Stratified K-Fold n times.
"""
def __init__(self, n_splits=5, *, shuffle=False, random_state=None):
super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state)
def _make_test_folds(self, X, y=None):
rng = check_random_state(self.random_state)
y = np.asarray(y)
type_of_target_y = type_of_target(y)
allowed_target_types = ("binary", "multiclass")
if type_of_target_y not in allowed_target_types:
raise ValueError(
"Supported target types are: {}. Got {!r} instead.".format(
allowed_target_types, type_of_target_y
)
)
y = column_or_1d(y)
_, y_idx, y_inv = np.unique(y, return_index=True, return_inverse=True)
# y_inv encodes y according to lexicographic order. We invert y_idx to
# map the classes so that they are encoded by order of appearance:
# 0 represents the first label appearing in y, 1 the second, etc.
_, class_perm = np.unique(y_idx, return_inverse=True)
y_encoded = class_perm[y_inv]
n_classes = len(y_idx)
y_counts = np.bincount(y_encoded)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError(
"n_splits=%d cannot be greater than the"
" number of members in each class." % (self.n_splits)
)
if self.n_splits > min_groups:
warnings.warn(
"The least populated class in y has only %d"
" members, which is less than n_splits=%d."
% (min_groups, self.n_splits),
UserWarning,
)
# Determine the optimal number of samples from each class in each fold,
# using round robin over the sorted y. (This can be done direct from
# counts, but that code is unreadable.)
y_order = np.sort(y_encoded)
allocation = np.asarray(
[
np.bincount(y_order[i :: self.n_splits], minlength=n_classes)
for i in range(self.n_splits)
]
)
# To maintain the data order dependencies as best as possible within
# the stratification constraint, we assign samples from each class in
# blocks (and then mess that up when shuffle=True).
test_folds = np.empty(len(y), dtype="i")
for k in range(n_classes):
# since the kth column of allocation stores the number of samples
# of class k in each test set, this generates blocks of fold
# indices corresponding to the allocation for class k.
folds_for_class = np.arange(self.n_splits).repeat(allocation[:, k])
if self.shuffle:
rng.shuffle(folds_for_class)
test_folds[y_encoded == k] = folds_for_class
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
y = check_array(y, input_name="y", ensure_2d=False, dtype=None)
return super().split(X, y, groups)
class StratifiedGroupKFold(_BaseKFold):
"""Stratified K-Folds iterator variant with non-overlapping groups.
This cross-validation object is a variation of StratifiedKFold attempts to
return stratified folds with non-overlapping groups. The folds are made by
preserving the percentage of samples for each class.
Each group will appear exactly once in the test set across all folds (the
number of distinct groups has to be at least equal to the number of folds).
The difference between :class:`~sklearn.model_selection.GroupKFold`
and :class:`~sklearn.model_selection.StratifiedGroupKFold` is that
the former attempts to create balanced folds such that the number of
distinct groups is approximately the same in each fold, whereas
StratifiedGroupKFold attempts to create folds which preserve the
percentage of samples for each class as much as possible given the
constraint of non-overlapping groups between splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
shuffle : bool, default=False
Whether to shuffle each class's samples before splitting into batches.
Note that the samples within each split will not be shuffled.
This implementation can only shuffle groups that have approximately the
same y distribution, no global shuffle will be performed.
random_state : int or RandomState instance, default=None
When `shuffle` is True, `random_state` affects the ordering of the
indices, which controls the randomness of each fold for each class.
Otherwise, leave `random_state` as `None`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import StratifiedGroupKFold
>>> X = np.ones((17, 2))
>>> y = np.array([0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
>>> groups = np.array([1, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5, 5, 6, 6, 7, 8, 8])
>>> sgkf = StratifiedGroupKFold(n_splits=3)
>>> sgkf.get_n_splits(X, y)
3
>>> print(sgkf)
StratifiedGroupKFold(n_splits=3, random_state=None, shuffle=False)
>>> for i, (train_index, test_index) in enumerate(sgkf.split(X, y, groups)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" group={groups[train_index]}")
... print(f" Test: index={test_index}")
... print(f" group={groups[test_index]}")
Fold 0:
Train: index=[ 0 1 2 3 7 8 9 10 11 15 16]
group=[1 1 2 2 4 5 5 5 5 8 8]
Test: index=[ 4 5 6 12 13 14]
group=[3 3 3 6 6 7]
Fold 1:
Train: index=[ 4 5 6 7 8 9 10 11 12 13 14]
group=[3 3 3 4 5 5 5 5 6 6 7]
Test: index=[ 0 1 2 3 15 16]
group=[1 1 2 2 8 8]
Fold 2:
Train: index=[ 0 1 2 3 4 5 6 12 13 14 15 16]
group=[1 1 2 2 3 3 3 6 6 7 8 8]
Test: index=[ 7 8 9 10 11]
group=[4 5 5 5 5]
Notes
-----
The implementation is designed to:
* Mimic the behavior of StratifiedKFold as much as possible for trivial
groups (e.g. when each group contains only one sample).
* Be invariant to class label: relabelling ``y = ["Happy", "Sad"]`` to
``y = [1, 0]`` should not change the indices generated.
* Stratify based on samples as much as possible while keeping
non-overlapping groups constraint. That means that in some cases when
there is a small number of groups containing a large number of samples
the stratification will not be possible and the behavior will be close
to GroupKFold.
See also
--------
StratifiedKFold: Takes class information into account to build folds which
retain class distributions (for binary or multiclass classification
tasks).
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_splits=5, shuffle=False, random_state=None):
super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state)
def _iter_test_indices(self, X, y, groups):
# Implementation is based on this kaggle kernel:
# https://www.kaggle.com/jakubwasikowski/stratified-group-k-fold-cross-validation
# and is a subject to Apache 2.0 License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Changelist:
# - Refactored function to a class following scikit-learn KFold
# interface.
# - Added heuristic for assigning group to the least populated fold in
# cases when all other criteria are equal
# - Swtch from using python ``Counter`` to ``np.unique`` to get class
# distribution
# - Added scikit-learn checks for input: checking that target is binary
# or multiclass, checking passed random state, checking that number
# of splits is less than number of members in each class, checking
# that least populated class has more members than there are splits.
rng = check_random_state(self.random_state)
y = np.asarray(y)
type_of_target_y = type_of_target(y)
allowed_target_types = ("binary", "multiclass")
if type_of_target_y not in allowed_target_types:
raise ValueError(
"Supported target types are: {}. Got {!r} instead.".format(
allowed_target_types, type_of_target_y
)
)
y = column_or_1d(y)
_, y_inv, y_cnt = np.unique(y, return_inverse=True, return_counts=True)
if np.all(self.n_splits > y_cnt):
raise ValueError(
"n_splits=%d cannot be greater than the"
" number of members in each class." % (self.n_splits)
)
n_smallest_class = np.min(y_cnt)
if self.n_splits > n_smallest_class:
warnings.warn(
"The least populated class in y has only %d"
" members, which is less than n_splits=%d."
% (n_smallest_class, self.n_splits),
UserWarning,
)
n_classes = len(y_cnt)
_, groups_inv, groups_cnt = np.unique(
groups, return_inverse=True, return_counts=True
)
y_counts_per_group = np.zeros((len(groups_cnt), n_classes))
for class_idx, group_idx in zip(y_inv, groups_inv):
y_counts_per_group[group_idx, class_idx] += 1
y_counts_per_fold = np.zeros((self.n_splits, n_classes))
groups_per_fold = defaultdict(set)
if self.shuffle:
rng.shuffle(y_counts_per_group)
# Stable sort to keep shuffled order for groups with the same
# class distribution variance
sorted_groups_idx = np.argsort(
-np.std(y_counts_per_group, axis=1), kind="mergesort"
)
for group_idx in sorted_groups_idx:
group_y_counts = y_counts_per_group[group_idx]
best_fold = self._find_best_fold(
y_counts_per_fold=y_counts_per_fold,
y_cnt=y_cnt,
group_y_counts=group_y_counts,
)
y_counts_per_fold[best_fold] += group_y_counts
groups_per_fold[best_fold].add(group_idx)
for i in range(self.n_splits):
test_indices = [
idx
for idx, group_idx in enumerate(groups_inv)
if group_idx in groups_per_fold[i]
]
yield test_indices
def _find_best_fold(self, y_counts_per_fold, y_cnt, group_y_counts):
best_fold = None
min_eval = np.inf
min_samples_in_fold = np.inf
for i in range(self.n_splits):
y_counts_per_fold[i] += group_y_counts
# Summarise the distribution over classes in each proposed fold
std_per_class = np.std(y_counts_per_fold / y_cnt.reshape(1, -1), axis=0)
y_counts_per_fold[i] -= group_y_counts
fold_eval = np.mean(std_per_class)
samples_in_fold = np.sum(y_counts_per_fold[i])
is_current_fold_better = (
fold_eval < min_eval
or np.isclose(fold_eval, min_eval)
and samples_in_fold < min_samples_in_fold
)
if is_current_fold_better:
min_eval = fold_eval
min_samples_in_fold = samples_in_fold
best_fold = i
return best_fold
class TimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <time_series_split>`.
.. versionadded:: 0.18
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
max_train_size : int, default=None
Maximum size for a single training set.
test_size : int, default=None
Used to limit the size of the test set. Defaults to
``n_samples // (n_splits + 1)``, which is the maximum allowed value
with ``gap=0``.
.. versionadded:: 0.24
gap : int, default=0
Number of samples to exclude from the end of each train set before
the test set.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> tscv = TimeSeriesSplit()
>>> print(tscv)
TimeSeriesSplit(gap=0, max_train_size=None, n_splits=5, test_size=None)
>>> for i, (train_index, test_index) in enumerate(tscv.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[0]
Test: index=[1]
Fold 1:
Train: index=[0 1]
Test: index=[2]
Fold 2:
Train: index=[0 1 2]
Test: index=[3]
Fold 3:
Train: index=[0 1 2 3]
Test: index=[4]
Fold 4:
Train: index=[0 1 2 3 4]
Test: index=[5]
>>> # Fix test_size to 2 with 12 samples
>>> X = np.random.randn(12, 2)
>>> y = np.random.randint(0, 2, 12)
>>> tscv = TimeSeriesSplit(n_splits=3, test_size=2)
>>> for i, (train_index, test_index) in enumerate(tscv.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[0 1 2 3 4 5]
Test: index=[6 7]
Fold 1:
Train: index=[0 1 2 3 4 5 6 7]
Test: index=[8 9]
Fold 2:
Train: index=[0 1 2 3 4 5 6 7 8 9]
Test: index=[10 11]
>>> # Add in a 2 period gap
>>> tscv = TimeSeriesSplit(n_splits=3, test_size=2, gap=2)
>>> for i, (train_index, test_index) in enumerate(tscv.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[0 1 2 3]
Test: index=[6 7]
Fold 1:
Train: index=[0 1 2 3 4 5]
Test: index=[8 9]
Fold 2:
Train: index=[0 1 2 3 4 5 6 7]
Test: index=[10 11]
Notes
-----
The training set has size ``i * n_samples // (n_splits + 1)
+ n_samples % (n_splits + 1)`` in the ``i`` th split,
with a test set of size ``n_samples//(n_splits + 1)`` by default,
where ``n_samples`` is the number of samples.
"""
def __init__(self, n_splits=5, *, max_train_size=None, test_size=None, gap=0):
super().__init__(n_splits, shuffle=False, random_state=None)
self.max_train_size = max_train_size
self.test_size = test_size
self.gap = gap
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
gap = self.gap
test_size = (
self.test_size if self.test_size is not None else n_samples // n_folds
)
# Make sure we have enough samples for the given split parameters
if n_folds > n_samples:
raise ValueError(
f"Cannot have number of folds={n_folds} greater"
f" than the number of samples={n_samples}."
)
if n_samples - gap - (test_size * n_splits) <= 0:
raise ValueError(
f"Too many splits={n_splits} for number of samples"
f"={n_samples} with test_size={test_size} and gap={gap}."
)
indices = np.arange(n_samples)
test_starts = range(n_samples - n_splits * test_size, n_samples, test_size)
for test_start in test_starts:
train_end = test_start - gap
if self.max_train_size and self.max_train_size < train_end:
yield (
indices[train_end - self.max_train_size : train_end],
indices[test_start : test_start + test_size],
)
else:
yield (
indices[:train_end],
indices[test_start : test_start + test_size],
)
class LeaveOneGroupOut(BaseCrossValidator):
"""Leave One Group Out cross-validator
Provides train/test indices to split data such that each training set is
comprised of all samples except ones belonging to one specific group.
Arbitrary domain specific group information is provided an array integers
that encodes the group of each sample.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <leave_one_group_out>`.
Notes
-----
Splits are ordered according to the index of the group left out. The first
split has testing set consisting of the group whose index in `groups` is
lowest, and so on.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeaveOneGroupOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> groups = np.array([1, 1, 2, 2])
>>> logo = LeaveOneGroupOut()
>>> logo.get_n_splits(X, y, groups)
2
>>> logo.get_n_splits(groups=groups) # 'groups' is always required
2
>>> print(logo)
LeaveOneGroupOut()
>>> for i, (train_index, test_index) in enumerate(logo.split(X, y, groups)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}, group={groups[train_index]}")
... print(f" Test: index={test_index}, group={groups[test_index]}")
Fold 0:
Train: index=[2 3], group=[2 2]
Test: index=[0 1], group=[1 1]
Fold 1:
Train: index=[0 1], group=[1 1]
Test: index=[2 3], group=[2 2]
See also
--------
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
# We make a copy of groups to avoid side-effects during iteration
groups = check_array(
groups, input_name="groups", copy=True, ensure_2d=False, dtype=None
)
unique_groups = np.unique(groups)
if len(unique_groups) <= 1:
raise ValueError(
"The groups parameter contains fewer than 2 unique groups "
"(%s). LeaveOneGroupOut expects at least 2." % unique_groups
)
for i in unique_groups:
yield groups == i
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set. This 'groups' parameter must always be specified to
calculate the number of splits, though the other parameters can be
omitted.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None)
return len(np.unique(groups))
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super().split(X, y, groups)
class LeavePGroupsOut(BaseCrossValidator):
"""Leave P Group(s) Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and LeaveOneGroupOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the groups while the latter uses samples
all assigned the same groups.
Read more in the :ref:`User Guide <leave_p_groups_out>`.
Parameters
----------
n_groups : int
Number of groups (``p``) to leave out in the test split.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeavePGroupsOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> groups = np.array([1, 2, 3])
>>> lpgo = LeavePGroupsOut(n_groups=2)
>>> lpgo.get_n_splits(X, y, groups)
3
>>> lpgo.get_n_splits(groups=groups) # 'groups' is always required
3
>>> print(lpgo)
LeavePGroupsOut(n_groups=2)
>>> for i, (train_index, test_index) in enumerate(lpgo.split(X, y, groups)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}, group={groups[train_index]}")
... print(f" Test: index={test_index}, group={groups[test_index]}")
Fold 0:
Train: index=[2], group=[3]
Test: index=[0 1], group=[1 2]
Fold 1:
Train: index=[1], group=[2]
Test: index=[0 2], group=[1 3]
Fold 2:
Train: index=[0], group=[1]
Test: index=[1 2], group=[2 3]
See Also
--------
GroupKFold : K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_groups):
self.n_groups = n_groups
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(
groups, input_name="groups", copy=True, ensure_2d=False, dtype=None
)
unique_groups = np.unique(groups)
if self.n_groups >= len(unique_groups):
raise ValueError(
"The groups parameter contains fewer than (or equal to) "
"n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut "
"expects that at least n_groups + 1 (%d) unique groups be "
"present" % (self.n_groups, unique_groups, self.n_groups + 1)
)
combi = combinations(range(len(unique_groups)), self.n_groups)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=bool)
for l in unique_groups[np.array(indices)]:
test_index[groups == l] = True
yield test_index
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set. This 'groups' parameter must always be specified to
calculate the number of splits, though the other parameters can be
omitted.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None)
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super().split(X, y, groups)
class _RepeatedSplits(metaclass=ABCMeta):
"""Repeated splits for an arbitrary randomized CV splitter.
Repeats splits for cross-validators n times with different randomization
in each repetition.
Parameters
----------
cv : callable
Cross-validator class.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, default=None
Passes `random_state` to the arbitrary repeating cross validator.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
**cvargs : additional params
Constructor parameters for cv. Must not contain random_state
and shuffle.
"""
def __init__(self, cv, *, n_repeats=10, random_state=None, **cvargs):
if not isinstance(n_repeats, numbers.Integral):
raise ValueError("Number of repetitions must be of Integral type.")
if n_repeats <= 0:
raise ValueError("Number of repetitions must be greater than 0.")
if any(key in cvargs for key in ("random_state", "shuffle")):
raise ValueError("cvargs must not contain random_state or shuffle.")
self.cv = cv
self.n_repeats = n_repeats
self.random_state = random_state
self.cvargs = cvargs
def split(self, X, y=None, groups=None):
"""Generates indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
n_repeats = self.n_repeats
rng = check_random_state(self.random_state)
for idx in range(n_repeats):
cv = self.cv(random_state=rng, shuffle=True, **self.cvargs)
for train_index, test_index in cv.split(X, y, groups):
yield train_index, test_index
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
y : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
rng = check_random_state(self.random_state)
cv = self.cv(random_state=rng, shuffle=True, **self.cvargs)
return cv.get_n_splits(X, y, groups) * self.n_repeats
def __repr__(self):
return _build_repr(self)
class RepeatedKFold(_RepeatedSplits):
"""Repeated K-Fold cross validator.
Repeats K-Fold n times with different randomization in each repetition.
Read more in the :ref:`User Guide <repeated_k_fold>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, default=None
Controls the randomness of each repeated cross-validation instance.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import RepeatedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=2652124)
>>> rkf.get_n_splits(X, y)
4
>>> print(rkf)
RepeatedKFold(n_repeats=2, n_splits=2, random_state=2652124)
>>> for i, (train_index, test_index) in enumerate(rkf.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
...
Fold 0:
Train: index=[0 1]
Test: index=[2 3]
Fold 1:
Train: index=[2 3]
Test: index=[0 1]
Fold 2:
Train: index=[1 2]
Test: index=[0 3]
Fold 3:
Train: index=[0 3]
Test: index=[1 2]
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
See Also
--------
RepeatedStratifiedKFold : Repeats Stratified K-Fold n times.
"""
def __init__(self, *, n_splits=5, n_repeats=10, random_state=None):
super().__init__(
KFold, n_repeats=n_repeats, random_state=random_state, n_splits=n_splits
)
class RepeatedStratifiedKFold(_RepeatedSplits):
"""Repeated Stratified K-Fold cross validator.
Repeats Stratified K-Fold n times with different randomization in each
repetition.
Read more in the :ref:`User Guide <repeated_k_fold>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, default=None
Controls the generation of the random states for each repetition.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import RepeatedStratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2,
... random_state=36851234)
>>> rskf.get_n_splits(X, y)
4
>>> print(rskf)
RepeatedStratifiedKFold(n_repeats=2, n_splits=2, random_state=36851234)
>>> for i, (train_index, test_index) in enumerate(rskf.split(X, y)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
...
Fold 0:
Train: index=[1 2]
Test: index=[0 3]
Fold 1:
Train: index=[0 3]
Test: index=[1 2]
Fold 2:
Train: index=[1 3]
Test: index=[0 2]
Fold 3:
Train: index=[0 2]
Test: index=[1 3]
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
See Also
--------
RepeatedKFold : Repeats K-Fold n times.
"""
def __init__(self, *, n_splits=5, n_repeats=10, random_state=None):
super().__init__(
StratifiedKFold,
n_repeats=n_repeats,
random_state=random_state,
n_splits=n_splits,
)
class BaseShuffleSplit(metaclass=ABCMeta):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(
self, n_splits=10, *, test_size=None, train_size=None, random_state=None
):
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self._default_test_size = 0.1
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
X, y, groups = indexable(X, y, groups)
for train, test in self._iter_indices(X, y, groups):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, groups=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <ShuffleSplit>`.
Parameters
----------
n_splits : int, default=10
Number of re-shuffling & splitting iterations.
test_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.1.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the training and testing indices produced.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=5, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
5
>>> print(rs)
ShuffleSplit(n_splits=5, random_state=0, test_size=0.25, train_size=None)
>>> for i, (train_index, test_index) in enumerate(rs.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[1 3 0 4]
Test: index=[5 2]
Fold 1:
Train: index=[4 0 2 5]
Test: index=[1 3]
Fold 2:
Train: index=[1 2 4 0]
Test: index=[3 5]
Fold 3:
Train: index=[3 4 1 0]
Test: index=[5 2]
Fold 4:
Train: index=[3 5 1 0]
Test: index=[2 4]
>>> # Specify train and test size
>>> rs = ShuffleSplit(n_splits=5, train_size=0.5, test_size=.25,
... random_state=0)
>>> for i, (train_index, test_index) in enumerate(rs.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[1 3 0]
Test: index=[5 2]
Fold 1:
Train: index=[4 0 2]
Test: index=[1 3]
Fold 2:
Train: index=[1 2 4]
Test: index=[3 5]
Fold 3:
Train: index=[3 4 1]
Test: index=[5 2]
Fold 4:
Train: index=[3 5 1]
Test: index=[2 4]
"""
def __init__(
self, n_splits=10, *, test_size=None, train_size=None, random_state=None
):
super().__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state,
)
self._default_test_size = 0.1
def _iter_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(
n_samples,
self.test_size,
self.train_size,
default_test_size=self._default_test_size,
)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test : (n_test + n_train)]
yield ind_train, ind_test
class GroupShuffleSplit(ShuffleSplit):
"""Shuffle-Group(s)-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided group. This group information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and GroupShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique groups,
whereas GroupShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique groups.
For example, a less computationally intensive alternative to
``LeavePGroupsOut(p=10)`` would be
``GroupShuffleSplit(test_size=10, n_splits=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to groups, and
not to samples, as in ShuffleSplit.
Read more in the :ref:`User Guide <group_shuffle_split>`.
Parameters
----------
n_splits : int, default=5
Number of re-shuffling & splitting iterations.
test_size : float, int, default=0.2
If float, should be between 0.0 and 1.0 and represent the proportion
of groups to include in the test split (rounded up). If int,
represents the absolute number of test groups. If None, the value is
set to the complement of the train size.
The default will change in version 0.21. It will remain 0.2 only
if ``train_size`` is unspecified, otherwise it will complement
the specified ``train_size``.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the train split. If
int, represents the absolute number of train groups. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the training and testing indices produced.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import GroupShuffleSplit
>>> X = np.ones(shape=(8, 2))
>>> y = np.ones(shape=(8, 1))
>>> groups = np.array([1, 1, 2, 2, 2, 3, 3, 3])
>>> print(groups.shape)
(8,)
>>> gss = GroupShuffleSplit(n_splits=2, train_size=.7, random_state=42)
>>> gss.get_n_splits()
2
>>> print(gss)
GroupShuffleSplit(n_splits=2, random_state=42, test_size=None, train_size=0.7)
>>> for i, (train_index, test_index) in enumerate(gss.split(X, y, groups)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}, group={groups[train_index]}")
... print(f" Test: index={test_index}, group={groups[test_index]}")
Fold 0:
Train: index=[2 3 4 5 6 7], group=[2 2 2 3 3 3]
Test: index=[0 1], group=[1 1]
Fold 1:
Train: index=[0 1 5 6 7], group=[1 1 3 3 3]
Test: index=[2 3 4], group=[2 2 2]
See Also
--------
ShuffleSplit : Shuffles samples to create independent test/train sets.
LeavePGroupsOut : Train set leaves out all possible subsets of `p` groups.
"""
def __init__(
self, n_splits=5, *, test_size=None, train_size=None, random_state=None
):
super().__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state,
)
self._default_test_size = 0.2
def _iter_indices(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None)
classes, group_indices = np.unique(groups, return_inverse=True)
for group_train, group_test in super()._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(group_indices, group_train))
test = np.flatnonzero(np.in1d(group_indices, group_test))
yield train, test
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
return super().split(X, y, groups)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <stratified_shuffle_split>`.
Parameters
----------
n_splits : int, default=10
Number of re-shuffling & splitting iterations.
test_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.1.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the training and testing indices produced.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 0, 1, 1, 1])
>>> sss = StratifiedShuffleSplit(n_splits=5, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
5
>>> print(sss)
StratifiedShuffleSplit(n_splits=5, random_state=0, ...)
>>> for i, (train_index, test_index) in enumerate(sss.split(X, y)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[5 2 3]
Test: index=[4 1 0]
Fold 1:
Train: index=[5 1 4]
Test: index=[0 2 3]
Fold 2:
Train: index=[5 0 2]
Test: index=[4 3 1]
Fold 3:
Train: index=[4 1 0]
Test: index=[2 3 5]
Fold 4:
Train: index=[0 5 1]
Test: index=[3 4 2]
"""
def __init__(
self, n_splits=10, *, test_size=None, train_size=None, random_state=None
):
super().__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state,
)
self._default_test_size = 0.1
def _iter_indices(self, X, y, groups=None):
n_samples = _num_samples(X)
y = check_array(y, input_name="y", ensure_2d=False, dtype=None)
n_train, n_test = _validate_shuffle_split(
n_samples,
self.test_size,
self.train_size,
default_test_size=self._default_test_size,
)
if y.ndim == 2:
# for multi-label y, map each distinct row to a string repr
# using join because str(row) uses an ellipsis if len(row) > 1000
y = np.array([" ".join(row.astype("str")) for row in y])
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = np.bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError(
"The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2."
)
if n_train < n_classes:
raise ValueError(
"The train_size = %d should be greater or "
"equal to the number of classes = %d" % (n_train, n_classes)
)
if n_test < n_classes:
raise ValueError(
"The test_size = %d should be greater or "
"equal to the number of classes = %d" % (n_test, n_classes)
)
# Find the sorted list of instances for each class:
# (np.unique above performs a sort, so code is O(n logn) already)
class_indices = np.split(
np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1]
)
rng = check_random_state(self.random_state)
for _ in range(self.n_splits):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i in range(n_classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = class_indices[i].take(permutation, mode="clip")
train.extend(perm_indices_class_i[: n_i[i]])
test.extend(perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like of shape (n_samples,) or (n_samples, n_labels)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
y = check_array(y, input_name="y", ensure_2d=False, dtype=None)
return super().split(X, y, groups)
def _validate_shuffle_split(n_samples, test_size, train_size, default_test_size=None):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if test_size is None and train_size is None:
test_size = default_test_size
test_size_type = np.asarray(test_size).dtype.kind
train_size_type = np.asarray(train_size).dtype.kind
if (
test_size_type == "i"
and (test_size >= n_samples or test_size <= 0)
or test_size_type == "f"
and (test_size <= 0 or test_size >= 1)
):
raise ValueError(
"test_size={0} should be either positive and smaller"
" than the number of samples {1} or a float in the "
"(0, 1) range".format(test_size, n_samples)
)
if (
train_size_type == "i"
and (train_size >= n_samples or train_size <= 0)
or train_size_type == "f"
and (train_size <= 0 or train_size >= 1)
):
raise ValueError(
"train_size={0} should be either positive and smaller"
" than the number of samples {1} or a float in the "
"(0, 1) range".format(train_size, n_samples)
)
if train_size is not None and train_size_type not in ("i", "f"):
raise ValueError("Invalid value for train_size: {}".format(train_size))
if test_size is not None and test_size_type not in ("i", "f"):
raise ValueError("Invalid value for test_size: {}".format(test_size))
if train_size_type == "f" and test_size_type == "f" and train_size + test_size > 1:
raise ValueError(
"The sum of test_size and train_size = {}, should be in the (0, 1)"
" range. Reduce test_size and/or train_size.".format(train_size + test_size)
)
if test_size_type == "f":
n_test = ceil(test_size * n_samples)
elif test_size_type == "i":
n_test = float(test_size)
if train_size_type == "f":
n_train = floor(train_size * n_samples)
elif train_size_type == "i":
n_train = float(train_size)
if train_size is None:
n_train = n_samples - n_test
elif test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError(
"The sum of train_size and test_size = %d, "
"should be smaller than the number of "
"samples %d. Reduce test_size and/or "
"train_size." % (n_train + n_test, n_samples)
)
n_train, n_test = int(n_train), int(n_test)
if n_train == 0:
raise ValueError(
"With n_samples={}, test_size={} and train_size={}, the "
"resulting train set will be empty. Adjust any of the "
"aforementioned parameters.".format(n_samples, test_size, train_size)
)
return n_train, n_test
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Provides train/test indices to split data into train/test sets using a
predefined scheme specified by the user with the ``test_fold`` parameter.
Read more in the :ref:`User Guide <predefined_split>`.
.. versionadded:: 0.16
Parameters
----------
test_fold : array-like of shape (n_samples,)
The entry ``test_fold[i]`` represents the index of the test set that
sample ``i`` belongs to. It is possible to exclude sample ``i`` from
any test set (i.e. include sample ``i`` in every training set) by
setting ``test_fold[i]`` equal to -1.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps)
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for i, (train_index, test_index) in enumerate(ps.split()):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[1 2 3]
Test: index=[0]
Fold 1:
Train: index=[0 2]
Test: index=[1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = list(cv)
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv)
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=5, y=None, *, classifier=False):
"""Input checker utility for building a cross-validator.
Parameters
----------
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable that generates (train, test) splits as arrays of indices.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value changed from 3-fold to 5-fold.
y : array-like, default=None
The target variable for supervised learning problems.
classifier : bool, default=False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
cv = 5 if cv is None else cv
if isinstance(cv, numbers.Integral):
if (
classifier
and (y is not None)
and (type_of_target(y, input_name="y") in ("binary", "multiclass"))
):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, "split") or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError(
"Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv
)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(
*arrays,
test_size=None,
train_size=None,
random_state=None,
shuffle=True,
stratify=None,
):
"""Split arrays or matrices into random train and test subsets.
Quick utility that wraps input validation,
``next(ShuffleSplit().split(X, y))``, and application to input data
into a single call for splitting (and optionally subsampling) data into a
one-liner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. If ``train_size`` is also None, it will
be set to 0.25.
train_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, default=None
Controls the shuffling applied to the data before applying the split.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
shuffle : bool, default=True
Whether or not to shuffle the data before splitting. If shuffle=False
then stratify must be None.
stratify : array-like, default=None
If not None, data is split in a stratified fashion, using this as
the class labels.
Read more in the :ref:`User Guide <stratification>`.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
>>> train_test_split(y, shuffle=False)
[[0, 1, 2], [3, 4]]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
n_train, n_test = _validate_shuffle_split(
n_samples, test_size, train_size, default_test_size=0.25
)
if shuffle is False:
if stratify is not None:
raise ValueError(
"Stratified train/test split is not implemented for shuffle=False"
)
train = np.arange(n_train)
test = np.arange(n_train, n_train + n_test)
else:
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=n_test, train_size=n_train, random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(
chain.from_iterable(
(_safe_indexing(a, train), _safe_indexing(a, test)) for a in arrays
)
)
# Tell nose that train_test_split is not a test.
# (Needed for external libraries that may use nose.)
# Use setattr to avoid mypy errors when monkeypatching.
setattr(train_test_split, "__test__", False)
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params : dict
The dictionary to pretty print
offset : int, default=0
The offset in characters to add at the begin of each line.
printer : callable, default=repr
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ",\n" + (1 + offset // 2) * " "
for i, (k, v) in enumerate(sorted(params.items())):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = "%s=%s" % (k, str(v))
else:
# use repr of the rest
this_repr = "%s=%s" % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + "..." + this_repr[-100:]
if i > 0:
if this_line_length + len(this_repr) >= 75 or "\n" in this_repr:
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(", ")
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = "".join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = "\n".join(l.rstrip(" ") for l in lines.split("\n"))
return lines
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, "deprecated_original", cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted(
[
p.name
for p in init_signature.parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
)
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", FutureWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if value is None and hasattr(self, "cvargs"):
value = self.cvargs.get(key, None)
if len(w) and w[0].category == FutureWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return "%s(%s)" % (class_name, _pprint(params, offset=len(class_name)))
def _yields_constant_splits(cv):
# Return True if calling cv.split() always returns the same splits
# We assume that if a cv doesn't have a shuffle parameter, it shuffles by
# default (e.g. ShuffleSplit). If it actually doesn't shuffle (e.g.
# LeaveOneOut), then it won't have a random_state parameter anyway, in
# which case it will default to 0, leading to output=True
shuffle = getattr(cv, "shuffle", True)
random_state = getattr(cv, "random_state", 0)
return isinstance(random_state, numbers.Integral) or not shuffle
| bsd-3-clause |
vinayak-mehta/scikit-learn | examples/svm/plot_svm_tie_breaking.py | 13 | 2165 | """
=========================================================
SVM Tie Breaking Example
=========================================================
Tie breaking is costly if ``decision_function_shape='ovr'``, and therefore it
is not enabled by default. This example illustrates the effect of the
``break_ties`` parameter for a multiclass classification problem and
``decision_function_shape='ovr'``.
The two plots differ only in the area in the middle where the classes are
tied. If ``break_ties=False``, all input in that area would be classified as
one class, whereas if ``break_ties=True``, the tie-breaking mechanism will
create a non-convex decision boundary in that area.
"""
# Code source: Andreas Mueller, Adrin Jalali
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.datasets import make_blobs
X, y = make_blobs(random_state=27)
fig, sub = plt.subplots(2, 1, figsize=(5, 8))
titles = ("break_ties = False", "break_ties = True")
for break_ties, title, ax in zip((False, True), titles, sub.flatten()):
svm = SVC(
kernel="linear", C=1, break_ties=break_ties, decision_function_shape="ovr"
).fit(X, y)
xlim = [X[:, 0].min(), X[:, 0].max()]
ylim = [X[:, 1].min(), X[:, 1].max()]
xs = np.linspace(xlim[0], xlim[1], 1000)
ys = np.linspace(ylim[0], ylim[1], 1000)
xx, yy = np.meshgrid(xs, ys)
pred = svm.predict(np.c_[xx.ravel(), yy.ravel()])
colors = [plt.cm.Accent(i) for i in [0, 4, 7]]
points = ax.scatter(X[:, 0], X[:, 1], c=y, cmap="Accent")
classes = [(0, 1), (0, 2), (1, 2)]
line = np.linspace(X[:, 1].min() - 5, X[:, 1].max() + 5)
ax.imshow(
-pred.reshape(xx.shape),
cmap="Accent",
alpha=0.2,
extent=(xlim[0], xlim[1], ylim[1], ylim[0]),
)
for coef, intercept, col in zip(svm.coef_, svm.intercept_, classes):
line2 = -(line * coef[1] + intercept) / coef[0]
ax.plot(line2, line, "-", c=colors[col[0]])
ax.plot(line2, line, "--", c=colors[col[1]])
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_title(title)
ax.set_aspect("equal")
plt.show()
| bsd-3-clause |
procoder317/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 226 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
thilbern/scikit-learn | examples/svm/plot_svm_anova.py | 249 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
fyffyt/scikit-learn | benchmarks/bench_sgd_regression.py | 281 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
infinit/grpc | tools/gcp/utils/big_query_utils.py | 42 | 5378 | #!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import json
import uuid
import httplib2
from apiclient import discovery
from apiclient.errors import HttpError
from oauth2client.client import GoogleCredentials
NUM_RETRIES = 3
def create_big_query():
"""Authenticates with cloud platform and gets a BiqQuery service object
"""
creds = GoogleCredentials.get_application_default()
return discovery.build('bigquery', 'v2', credentials=creds)
def create_dataset(biq_query, project_id, dataset_id):
is_success = True
body = {
'datasetReference': {
'projectId': project_id,
'datasetId': dataset_id
}
}
try:
dataset_req = biq_query.datasets().insert(projectId=project_id, body=body)
dataset_req.execute(num_retries=NUM_RETRIES)
except HttpError as http_error:
if http_error.resp.status == 409:
print 'Warning: The dataset %s already exists' % dataset_id
else:
# Note: For more debugging info, print "http_error.content"
print 'Error in creating dataset: %s. Err: %s' % (dataset_id, http_error)
is_success = False
return is_success
def create_table(big_query, project_id, dataset_id, table_id, table_schema,
description):
fields = [{'name': field_name,
'type': field_type,
'description': field_description
} for (field_name, field_type, field_description) in table_schema]
return create_table2(big_query, project_id, dataset_id, table_id,
fields, description)
def create_table2(big_query, project_id, dataset_id, table_id, fields_schema,
description):
is_success = True
body = {
'description': description,
'schema': {
'fields': fields_schema
},
'tableReference': {
'datasetId': dataset_id,
'projectId': project_id,
'tableId': table_id
}
}
try:
table_req = big_query.tables().insert(projectId=project_id,
datasetId=dataset_id,
body=body)
res = table_req.execute(num_retries=NUM_RETRIES)
print 'Successfully created %s "%s"' % (res['kind'], res['id'])
except HttpError as http_error:
if http_error.resp.status == 409:
print 'Warning: Table %s already exists' % table_id
else:
print 'Error in creating table: %s. Err: %s' % (table_id, http_error)
is_success = False
return is_success
def insert_rows(big_query, project_id, dataset_id, table_id, rows_list):
is_success = True
body = {'rows': rows_list}
try:
insert_req = big_query.tabledata().insertAll(projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=body)
res = insert_req.execute(num_retries=NUM_RETRIES)
if res.get('insertErrors', None):
print 'Error inserting rows! Response: %s' % res
is_success = False
except HttpError as http_error:
print 'Error inserting rows to the table %s' % table_id
is_success = False
return is_success
def sync_query_job(big_query, project_id, query, timeout=5000):
query_data = {'query': query, 'timeoutMs': timeout}
query_job = None
try:
query_job = big_query.jobs().query(
projectId=project_id,
body=query_data).execute(num_retries=NUM_RETRIES)
except HttpError as http_error:
print 'Query execute job failed with error: %s' % http_error
print http_error.content
return query_job
# List of (column name, column type, description) tuples
def make_row(unique_row_id, row_values_dict):
"""row_values_dict is a dictionary of column name and column value.
"""
return {'insertId': unique_row_id, 'json': row_values_dict}
| bsd-3-clause |
LiaoPan/scikit-learn | sklearn/neighbors/approximate.py | 127 | 22351 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Joel Nothman <joel.nothman@gmail.com>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[i], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[i], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
google-research/proteinfer | colab_evaluation.py | 1 | 11557 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Version of evaluation utilities intended for use in lower memory environments, such as colab.
This version of the evaluation code leverages the fact that the vast majority of example-label predictions
are essentially zero, and so only contribute to false negatives. It therefore represents the data in "tidy
format" with one row per example-label pair and excludes example-label pairs below a defined threshold.
"""
import numpy as np
import pandas as pd
import sklearn
import tqdm
import inference
import utils
import evaluation
def read_blast_table(filename):
"""Read a table of BLAST results."""
blast_out = pd.read_table(filename,
names=[
'up_id', 'target', 'pc_identity',
'pc_positives', 'alignment_length',
'mismatches', 'gap_opens', 'q. start',
'q. end', 's. start', 'evalue', 'bit_score'
])
def extract_accession(long_string):
"""Strip out accession decoration from a parameter"""
return long_string.replace('accession="', '').replace('"', '')
blast_out['up_id'] = blast_out['up_id'].map(extract_accession)
blast_out['target'] = blast_out['target'].map(extract_accession)
blast_out = blast_out[[
'up_id', 'target', 'pc_identity', 'alignment_length', 'bit_score'
]]
return blast_out
def stats_by_group(df):
"""Calculate statistics from a groupby'ed dataframe with TPs,FPs and FNs."""
EPSILON = 1e-10
result = df[['tp', 'fp', 'fn']].sum().reset_index().assign(
precision=lambda x: (x['tp'] + EPSILON) /
(x['tp'] + x['fp'] + EPSILON),
recall=lambda x: (x['tp'] + EPSILON) /
(x['tp'] + x['fn'] + EPSILON)).assign(
f1=lambda x: 2 * x['precision'] * x['recall'] /
(x['precision'] + x['recall'] + EPSILON),
count=lambda x: x['tp'] + x['fn'])
result['proportion'] = result['count'] / np.sum(result['count'])
result['proportion_text'] = (result['proportion'] *
100).round(2).astype(str) + "%"
return result
def get_stats(df):
"""Calculate statistics from a dataframe with TPs,FPs and FNs."""
df['dummy_group'] = 'all'
data = stats_by_group(df.groupby('dummy_group')).drop(
columns=['dummy_group', 'proportion', 'proportion_text'])
return data
def apply_threshold_and_return_stats(predictions_df,
ground_truth_df,
threshold=0.5,
grouping=None):
"""Given predictions, ground truth and a threshold get statistics."""
calls = assign_tp_fp_fn(predictions_df, ground_truth_df, threshold)
if grouping:
calls['group'] = calls['label'].map(grouping)
return stats_by_group(
calls.groupby("group")).assign(threshold=threshold)
else:
return get_stats(calls).assign(threshold=threshold)
def batch_inferences(iterator, batch_size):
"""Yield batches of seq_ids and predictions matrices from an iterator."""
counter = 0
predictions = []
seq_ids = []
while True:
try:
inference = next(iterator)
except StopIteration:
if len(seq_ids) > 0:
yield seq_ids, np.vstack(predictions)
return
seq_ids.append(inference[0])
predictions.append(inference[1])
counter += 1
if counter == batch_size:
yield seq_ids, np.vstack(predictions)
predictions = []
seq_ids = []
counter = 0
def batched_inferences_from_files(shard_names, batch_size=100):
"""Iterate through TFRecord files of inferences and yield batches."""
for file_name in tqdm.tqdm(shard_names, position=0):
inference_iterator = inference.parse_shard(file_name)
batched_iterator = batch_inferences(inference_iterator, batch_size)
while True:
try:
yield next(batched_iterator)
except StopIteration:
break
def batched_inferences_from_dir(shard_dir_path, batch_size=100):
"""Iterate through directory of inference TFRecord files and yield batches."""
files_to_process = utils.absolute_paths_of_files_in_dir(shard_dir_path)
return batched_inferences_from_files(files_to_process, batch_size)
def _make_tidy_df_from_seq_names_and_prediction_array(
sequence_names, predictions_array, vocab,
min_decision_threshold=1e-20):
"""Given a list of sequences and a matrix of prediction values, yield a tidy dataframe of predictions."""
up_ids = []
labels = []
values = []
for i in range(len(sequence_names)):
up_id = sequence_names[i]
preds = predictions_array[i, :]
for vocab_index in np.argwhere(preds > min_decision_threshold):
vocab_index = vocab_index[0]
up_ids.append(up_id)
labels.append(vocab[vocab_index])
values.append(preds[vocab_index])
return pd.DataFrame({"up_id": up_ids, "label": labels, "value": values})
def get_normalized_inference_results(shard_dir_path,
vocab,
label_normalizer,
min_decision_threshold=1e-20):
"""Take a directory of sharded inferences and output a tidy and normalized dataframe.
Inferences are in the format defined in inference.py
Args:
shard_dir_path: directory of TFrecord inference shards
vocab: a list of vocabulary items
label_normalizer: a dictionary mapping vocabulary items to their parents
min_decision_threshold: a threshold reflecting the minimum we will ever be
able to use to call a positive in subsequent analysis. Higher
values use less RAM at the expense of lower maximum sensitivity.
Returns:
A pandas dataframe with one row per example-label (provided value > min_decision_threshold) and the
associated value from the neural network.
"""
batches = batched_inferences_from_dir(shard_dir_path)
dfs = []
for seq_names, confidences in batches:
normed_confidences = evaluation.normalize_confidences(
confidences, vocab, label_normalizer)
dfs.append(
_make_tidy_df_from_seq_names_and_prediction_array(
seq_names,
normed_confidences,
vocab,
min_decision_threshold=min_decision_threshold))
return pd.concat(dfs)
def make_tidy_df_from_ground_truth(ground_truth):
"""Create a tidy dataframe from ground truth data."""
up_ids = []
labels = []
for i in tqdm.tqdm(ground_truth.index, position=0):
up_id = ground_truth['sequence_name'][i]
for vocab_entry in ground_truth['true_label'][i]:
up_ids.append(up_id)
labels.append(vocab_entry)
return pd.DataFrame({"up_id": up_ids, "label": labels, "gt": True})
def merge_predictions_and_ground_truth(predictions_df, ground_truth_df):
"""Perform an outer join of predictions and ground truth, then set all empty values to False."""
combined = predictions_df.merge(ground_truth_df,
how="outer",
suffixes=("_pred", "_gt"),
left_on=["label", "up_id"],
right_on=["label", "up_id"])
combined = combined.fillna(False)
return combined
def get_pr_curve_df(predictions_df,
ground_truth_df,
grouping=None,
filtered=True):
"""Given predictions and ground truth in tidy format, yield a precision recall curve.
Args:
predictions_df: predictions in tidy format
ground_truth_df: ground truth in tidy format
grouping: optional dictionary mapping sequence names to categories
filtered: whether to remove almost redundant points on PR curve
"""
combined = merge_predictions_and_ground_truth(predictions_df,
ground_truth_df)
if grouping == None:
to_process = {'all': combined}.items()
else:
combined['group'] = combined['label'].map(grouping)
to_process = combined.groupby('group')
del combined
output_dfs = []
for group_name, group in tqdm.tqdm(to_process, position=0):
precisions, recalls, thresholds = sklearn.metrics.precision_recall_curve(
group['gt'], group['value'])
precisions = precisions[:-1]
recalls = recalls[:-1]
if filtered:
precisions, recalls, thresholds = filter_pr_curve(
precisions, recalls, thresholds)
output_dfs.append(
pd.DataFrame({
'group':
group_name,
'precision':
precisions,
'recall':
recalls,
'threshold':
thresholds,
'f1':
2 * precisions * recalls / (precisions + recalls)
}))
return pd.concat(output_dfs)
def filter_pr_curve(precisions, recalls, thresholds, resolution=1e-4):
"""Filters out imperceptible shifts in a PR curve."""
last_precision = None
last_recall = None
new_precisions = []
new_recalls = []
new_thresholds = []
for i in range(len(precisions)):
if last_precision is None or abs(recalls[i] -
last_recall) >= resolution:
new_precisions.append(precisions[i])
last_precision = precisions[i]
new_recalls.append(recalls[i])
last_recall = recalls[i]
new_thresholds.append(thresholds[i])
return np.array(new_precisions), np.array(new_recalls), np.array(
new_thresholds)
def assign_tp_fp_fn(predictions_df, ground_truth_df, threshold):
"""Return a new predictions dataframe where each row is assigned as either a TP, FP or FN."""
combined = merge_predictions_and_ground_truth(predictions_df,
ground_truth_df)
combined['tp'] = (combined['gt'] == True) & (combined['value'] > threshold)
combined['fp'] = (combined['gt'] == False) & (combined['value'] >
threshold)
combined['fn'] = (combined['gt'] == True) & (combined['value'] < threshold)
return combined | apache-2.0 |
yvlasov/ConProbIN | try-ml/try-regration-v01.py | 1 | 2585 | #!/usr/bin/python
"""
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
###############################################################################
# Generate sample data
import pandas
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
#url = "MotionSymulationForNNet.csv"
url = "20000-samples.csv"
names = ['ShortCurve', 'ShortCurveDer', 'LongTail', 'NoizeSignal', 'SignalWave']
#names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = pandas.read_csv(url, names=names)
url = "MotionSymulationForNNet-control.csv"
names = ['ShortCurve', 'ShortCurveDer', 'LongTail', 'NoizeSignal', 'SignalWave']
#names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset_control = pandas.read_csv(url, names=names)
T = np.linspace(0, 1, 1001)[:, np.newaxis]
# Split-out validation dataset
array = dataset.values
#print array
X = array[:,0:4]
y = array[:,4]
array = dataset_control.values
#print array
X_control = array[:,0:4]
y_control = array[:,4]
#print X
#print Y
#validation_size = 0.20
#seed = 7
#X_train, X_validation, Y_train, Y_validation = cross_validation.train_test_split(X, Y, test_size=validation_size, random_state=seed)
###############################################################################
# Fit regression model
n_neighbors = 20000
print(len(T))
print(len(X))
print(len(y))
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(X_control)
plt.subplot(2, 1, i + 1)
# plt.scatter(T, y_control, c='r', label='data')
#'ShortCurveDer', 'LongTail', 'NoizeSignal', 'SignalWave']
# plt.plot(T, X_control[:,0], c='k', label='ShortCurve')
# plt.plot(T, X_control[:,1], c='grey', label='ShortCurveDer')
# plt.plot(T, X_control[:,2], c='b', label='LongTail')
plt.plot(T, X_control[:,3], c='y', label='NoizeSignal')
plt.plot(T, y_, c='g', label='prediction')
plt.plot(T, (y_-y_control), c='m', label='Prediction Error')
plt.plot(T, (y_-X_control[:,0]), c='c', label='Shortcurve Error')
plt.plot(T, y_control, c='r', label='Control Out')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| mit |
ZenDevelopmentSystems/scikit-learn | benchmarks/bench_glm.py | 295 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
PatrickChrist/scikit-learn | benchmarks/bench_glm.py | 295 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
fyffyt/scikit-learn | benchmarks/bench_glm.py | 295 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
djgagne/scikit-learn | benchmarks/bench_glm.py | 295 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
Tong-Chen/scikit-learn | benchmarks/bench_glm.py | 295 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
amueller/pystruct | examples/plot_latent_node.py | 2 | 3205 | """
=================================
Latent Variable Hierarchical CRF
=================================
Solving a 2d grid toy problem by introducing an additional layer of latent
variables.
"""
import numpy as np
import itertools
from pystruct.models import GraphCRF, LatentNodeCRF
from pystruct.learners import NSlackSSVM, OneSlackSSVM, LatentSSVM
from pystruct.datasets import make_simple_2x2
from pystruct.utils import make_grid_edges, plot_grid
import matplotlib.pyplot as plt
def plot_boxes(boxes, size=4, title=""):
cmap = plt.cm.gray
if boxes[0].size == size * size:
fig, ax = plt.subplots(1, len(boxes), figsize=(8, 0.7))
for a, x in zip(ax, boxes):
plot_grid(x[:size * size].reshape(size, size), cmap=cmap, axes=a,
border_color="green")
else:
# have hidden states
fig, ax = plt.subplots(2, len(boxes), figsize=(8, 1))
for a, x in zip(ax[0], boxes):
plot_grid(x[size * size:].reshape(size / 2, size / 2), cmap=cmap,
axes=a, border_color="green")
for a, x in zip(ax[1], boxes):
plot_grid(x[:size * size].reshape(size, size), cmap=cmap, axes=a,
border_color="green")
fig.subplots_adjust(.01, .03, .98, .75, .2, .05)
fig.suptitle(title)
# learn the "easy" 2x2 boxes dataset.
# a 2x2 box is placed randomly in a 4x4 grid
# we add a latent variable for each 2x2 patch
# that should make the model fairly simple
X, Y = make_simple_2x2(seed=1)
# flatten X and Y
X_flat = [x.reshape(-1, 1).astype(np.float) for x in X]
Y_flat = [y.ravel() for y in Y]
# first, use standard graph CRF. Can't do much, high loss.
crf = GraphCRF()
svm = NSlackSSVM(model=crf, max_iter=200, C=1, n_jobs=1)
G = [make_grid_edges(x) for x in X]
X_grid_edges = list(zip(X_flat, G))
svm.fit(X_grid_edges, Y_flat)
plot_boxes(svm.predict(X_grid_edges), title="Non-latent SSVM predictions")
print("Training score binary grid CRF: %f" % svm.score(X_grid_edges, Y_flat))
# using one latent variable for each 2x2 rectangle
latent_crf = LatentNodeCRF(n_labels=2, n_features=1, n_hidden_states=2,
inference_method='lp')
ssvm = OneSlackSSVM(model=latent_crf, max_iter=200, C=100,
n_jobs=-1, show_loss_every=10, inference_cache=50)
latent_svm = LatentSSVM(ssvm)
# make edges for hidden states:
edges = []
node_indices = np.arange(4 * 4).reshape(4, 4)
for i, (x, y) in enumerate(itertools.product([0, 2], repeat=2)):
for j in range(x, x + 2):
for k in range(y, y + 2):
edges.append([i + 4 * 4, node_indices[j, k]])
G = [np.vstack([make_grid_edges(x), edges]) for x in X]
# Random initialization
H_init = [np.hstack([y.ravel(), np.random.randint(2, 4, size=2 * 2)])
for y in Y]
plot_boxes(H_init, title="Top: Random initial hidden states. Bottom: Ground"
"truth labeling.")
X_ = list(zip(X_flat, G, [2 * 2 for x in X_flat]))
latent_svm.fit(X_, Y_flat, H_init)
print("Training score with latent nodes: %f " % latent_svm.score(X_, Y_flat))
H = latent_svm.predict_latent(X_)
plot_boxes(H, title="Top: Hidden states after training. Bottom: Prediction.")
plt.show()
| bsd-2-clause |
Succeed-Together/bakfu | process/tagging/tag_treetagger.py | 2 | 4160 | # -*- coding: utf-8 -*-
'''
tag_treetagger.py
This module is a wrapper to TreeTagger.
'''
import os
import sys
import string
import sklearn
from ...core.routes import register
from bakfu.process.base import BaseProcessor
DELIMITER = "<eol>"
DELIMITER_NL = "\n"+DELIMITER+"\n"
#TREETAGGER_HOME
__errors__ = []
try:
import treetaggerwrapper
#import treetagger
except Exception:
e = sys.exc_info()
__errors__.append(e)
@register('tagging.treetagger', __errors__)
class TreeTagger(BaseProcessor):
'''
Pre-processes data with treetagger.
:param: treetagger_home: Path to treetagger installation.
env var TREETAGGER_HOME will also be used.
default paths will be used otherwise.
:Example:
>>>from bakfu.examples.dataset1 import DATA
>>>import nltk
>>>baf = bakfu.Chain(lang="en")
>>>baf.load("data.simple",DATA)
>>>baf.process('tagging.treetagger')
>>>baf.process('vectorize.sklearn',
... min_df = 2,
... ngram_range=(1, 3),
... #stop_words=nltk.corpus.stopwords.words(baf.get('language')),
... max_features=100,
... tokenizer=lambda x:x,
... )
... preprocessor=lambda x:x,
>>>print(baf.get_chain("vectorizer").get_feature_names())
>>>print(baf.get_chain("vectorizer_result").toarray()[0])
'''
init_args = ()
init_kwargs = ('tagdir',)
run_args = ()
run_kwargs = ()
def __init__(self, *args, **kwargs):
super(TreeTagger, self).__init__(*args, **kwargs)
if 'treetagger_home' in kwargs:
self.TREETAGGER_HOME = kwargs['tagdir']
else:
self.TREETAGGER_HOME=os.environ.get('TREETAGGER_HOME','')
def run(self, caller, *args, **kwargs):
'''
TODO:CLEAN UP
'''
super(TreeTagger, self).run(caller, *args, **kwargs)
data_source = caller.get_chain('data_source')
self.caller=caller
cur_data = data_source.get_data()
#run treetagger
text = DELIMITER_NL.join(cur_data)
#tagger = treetagger.TreeTagger(
#encoding='utf8',
#path_to_home=self.TREETAGGER_HOME+'/cmd/tree-tagger-english-utf8',
#language=caller.get('language'))
#tags = tagger.tag(text)
tagger = treetaggerwrapper.TreeTagger(
TAGLANG=caller.get('lang'),
#TREETAGGER_HOME=self.TREETAGGER_HOME,
TAGDIR=self.TREETAGGER_HOME,
TAGINENC='utf-8',TAGOUTENC='utf-8')
tags = tagger.TagText(text)
#process treetagger output
tagged_data = []
buffer = []
for tag in tags:
tag = tag.split("\t")
if tag[0]==DELIMITER:
buffer = [ (a, b, c if c != '<unknown>' else a)
for a, b, c in buffer]
tagged_data.append(buffer)
buffer = []
else:
buffer.append(tag)
tagged_data.append(buffer)
result = tagged_data
caller.data['result'] = result
# Remove data according to filtered tags ;
FILTER_TAGS = ('SENT', 'KON', 'PUN','DT')
data_clean = [
filter(
lambda x:x[1] not in FILTER_TAGS
, line)
for line in tagged_data]
data_clean = [
filter(
lambda x:len(x[2]) > 2
, line)
for line in data_clean]
#remove tags ; only keep cannonical form
data_clean = [[d[2] for d in line] for line in data_clean]
#reformat data to ((id,data),...)
#note: data now contains lists of tokens instead of sentences
uids = data_source.get_uids()
new_data = zip(uids, data_clean)
#Assign processed data to a new data source
new_data_source = self.caller.load_unchained("data.simple", new_data)
new_data_source.meta_data = {"tokenized":True}
self._data.update(
{'result':result,
'tagger_result':result,
'data_source':new_data_source,
})
return self
| bsd-3-clause |
mpld3/mpld3_rewrite | test_renderer.py | 1 | 3397 | import numpy as np
import matplotlib.pyplot as plt
from mpld3_rewrite import fig_to_html
D3_URL = 'js/d3.v3.min.js'
MPLD3_URL = 'js/mpld3.v0.1.js'
def test1(filename):
x = np.linspace(0, 10, 50)
fig, ax = plt.subplots()
ax.grid(True)
ax.plot(x, np.sin(x), '-ob', alpha=0.5)
ax.plot([0.3, 0.5, 0.7], [0.5, 0.8, 0.5], '-ok', lw=2,
transform=ax.transAxes)
ax.plot(x, np.cos(x), '-^r', alpha=0.5)
ax.text(5, 0, "blue moving", fontsize=18, color="blue")
ax.text(0.5, 0.4, "red stationary", fontsize=18, color="red",
transform=ax.transAxes)
ax.set_xlabel('x label')
ax.set_ylabel('y label')
ax.add_patch(plt.Circle((5, 0), 0.3, ec='k', fc='g', alpha=0.2))
ax.add_patch(plt.Circle((0.3, 0.3), 0.1, ec='k', fc='y',
transform=ax.transAxes,
alpha=0.2))
print("writing to {0}".format(filename))
open(filename, 'w').write(fig_to_html(fig, d3_url=D3_URL,
mpld3_url=MPLD3_URL))
def test2(filename):
np.random.seed(0)
x, y = np.random.normal(0, 1, (2, 100))
fig, ax = plt.subplots()
ax.grid(True)
ax.scatter(x, y, c=np.random.random(x.shape),
s=100 + 300 * np.random.random(100),
alpha=0.3)
print("writing to {0}".format(filename))
open(filename, 'w').write(fig_to_html(fig, d3_url=D3_URL,
mpld3_url=MPLD3_URL))
def test3(filename):
fig, ax = plt.subplots()
x = np.linspace(-2, 2, 20)
y = x[:, None]
X = np.zeros((20, 20, 4))
X[:, :, 0] = np.exp(- (x - 1) ** 2 - (y) ** 2)
X[:, :, 1] = np.exp(- (x + 0.71) ** 2 - (y - 0.71) ** 2)
X[:, :, 2] = np.exp(- (x + 0.71) ** 2 - (y + 0.71) ** 2)
X[:, :, 3] = np.exp(-0.25 * (x ** 2 + y ** 2))
im = ax.imshow(X, extent=(10, 20, 10, 20),
origin='lower', zorder=1, interpolation='nearest')
fig.colorbar(im, ax=ax)
ax.text(16, 16, "overlaid text")
ax.text(16, 15, "covered text", zorder=0)
ax.set_title('An Image', size=20)
ax.set_xlim(9, 21)
ax.set_ylim(9, 21)
print("writing to {0}".format(filename))
open(filename, 'w').write(fig_to_html(fig, d3_url=D3_URL,
mpld3_url=MPLD3_URL))
def test4(filename):
from sklearn.datasets import load_iris
data = load_iris()
X = data.data
y = data.target
# dither the data for clearer plotting
X += 0.1 * np.random.random(X.shape)
fig, ax = plt.subplots(4, 4, sharex="col", sharey="row", figsize=(8, 8))
fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95,
hspace=0.1, wspace=0.1)
for i in range(4):
for j in range(4):
ax[3 - i, j].scatter(X[:, j], X[:, i],
c=y, s=40, alpha=0.3)
# remove tick labels
for axi in ax.flat:
for axis in [axi.xaxis, axi.yaxis]:
axis.set_major_formatter(plt.NullFormatter())
print("writing to {0}".format(filename))
open(filename, 'w').write(fig_to_html(fig, d3_url=D3_URL,
mpld3_url=MPLD3_URL))
if __name__ == '__main__':
test1("renderer_test-1.html")
test2("renderer_test-2.html")
test3("renderer_test-3.html")
test4("renderer_test-4.html")
| bsd-3-clause |
rasata/pypes | ui/pypesvds/lib/PypesInterface.py | 4 | 12978 | from pypes.pipeline import Dataflow
import pkg_resources
import logging
import os
import json
import traceback
from pylons import config
log = logging.getLogger(__name__)
ENTRYPOINT = 'pypesvds.plugins'
PLUGIN_DIR = os.path.join(os.path.dirname(__file__), '../plugins')
def init_plugins():
log.info('Initializing Studio Plugins from %s' % config['plugin_dir'])
try:
if not os.path.exists(config['plugin_dir']):
os.mkdir(config['plugin_dir'])
except:
log.info('Unable to create plugins directory: %s' % config['plugin_dir'])
eggs, errors = pkg_resources.working_set.find_plugins(pkg_resources.Environment([PLUGIN_DIR, config['plugin_dir']]))
map(pkg_resources.working_set.add, eggs)
plugins = {}
for egg in eggs:
egg.activate()
for name in egg.get_entry_map(ENTRYPOINT):
entry_point = egg.get_entry_info(ENTRYPOINT, name)
cls = entry_point.load()
if not hasattr(cls, '__metatype__'):
cls.__metatype__ = ''
if cls.__metatype__ in plugins:
d = plugins[cls.__metatype__]
d[cls.__name__] = cls
plugins[cls.__metatype__] = d
else:
d = {}
d[cls.__name__] = cls
plugins[cls.__metatype__] = d
return plugins
class DataFlowGraph(object):
def __init__(self):
self._workflow = None
self._config = None
self._graph = None
# load plugins
self.plugins = init_plugins()
self.plugin_registry = {}
# load each type here...
try:
self._filters = self.plugins['FILTER'].keys()
except:
self._filters = []
else:
self.plugin_registry.update(self.plugins['FILTER'])
try:
self._transformers = self.plugins['TRANSFORMER'].keys()
except:
self._transformers = []
else:
self.plugin_registry.update(self.plugins['TRANSFORMER'])
try:
self._operators = self.plugins['OPERATOR'].keys()
except:
self._operators = []
else:
self.plugin_registry.update(self.plugins['OPERATOR'])
try:
self._extractors = self.plugins['EXTRACTOR'].keys()
except:
self._extractors = []
else:
self.plugin_registry.update(self.plugins['EXTRACTOR'])
try:
self._input_adapters = self.plugins['ADAPTER'].keys()
except:
self._input_adapters = []
else:
self.plugin_registry.update(self.plugins['ADAPTER'])
try:
self._output_adapters = self.plugins['PUBLISHER'].keys()
except:
self._output_adapter = []
else:
self.plugin_registry.update(self.plugins['PUBLISHER'])
self._registered_instances = {}
fp = None
try:
fp = open('projects/default.txt', 'r')
jsconfig = fp.read()
config = json.loads(jsconfig)
except:
pass
else:
self.loadConfig(config)
finally:
if fp is not None:
fp.close()
def newInstance(self, className):
try:
className = className.replace(' ', '')
cls = self.plugin_registry[className]
this_instance = cls()
except:
# need to complain about invalid className
log.error('Failed to create component %s - Invalid Classname' % className)
log.debug('Plugin Registry Dump:', self.plugin_registry)
return None
else:
key = str(hash(this_instance))
self._registered_instances[key] = this_instance
return key
def removeInstance(self, key):
try:
self._registered_instances[key]
except:
log.error('Failed to delete component %s - Instance not found' % key)
log.debug('Registered Instances: %s' % self._registered_instances)
else:
self._registered_instances.pop(key)
def Inputs(self, key):
#return self._registered_instances[key].Inputs
# needs error handling here...
return self._registered_instances[key].get_in_ports()
def Outputs(self, key):
#return self._registered_instances[key].Outputs
return self._registered_instances[key].get_out_ports()
def Instance(self, key):
return self._registered_instances[key]
def update(self, jsconfig):
statusText = 'Unidentified Error Saving Project'
# close any existing workflow
if self.Workflow is not None:
try:
self.Workflow.close()
except:
pass
# set the new updated config from the UI
try:
self._config = json.loads(jsconfig)
except:
statusText = 'This Project Configuration is Bad'
else:
# Check for valid input component
(in_status, inputs) = self.config_has_valid_input()
(out_status, outputs) = self.config_has_valid_output()
if in_status is False:
statusText = 'Unable To Save Configuration<br><br>No Valid Adapter Specified<br>.'
# Check for valid output component
elif out_status is False:
statusText = 'Unable To Save Configuration<br><br>No Valid Publisher Specified<br>.'
else:
# translate the current config into a usable DAG
result = self.translate()
if result is False:
statusText = 'Error Translating Supplied Configuration'
# check the connectivity of the graph
elif not self.is_connected(inputs, outputs):
statusText = 'Unable To Save Project<br><br>Found Broken Path Between Adapter and Publisher.<br>'
else:
# Build the new workflow
try:
try:
# get the core count from the config
cores = int(config['cores'])
# has to be at least 1
if cores < 1:
cores = 1
except:
log.warning('Could not get core count from config.')
traceback.print_exc()
log.warning('Defaulting to core count of 1')
cores = 1
#log.info('Core count: %s' % cores)
self.Workflow = Dataflow(self.Graph, cores)
except:
statusText = 'Error Constructing Workflow'
else:
statusText = 'Project Successfully Saved'
# Save config here...
fp = None
try:
fp = open('projects/default.txt', 'w')
fp.write(jsconfig)
except:
log.error('Unable to save configuration')
else:
log.info('Configuration successfully saved')
finally:
if fp is not None:
fp.close()
return statusText
def is_connected(self, starts, ends):
for start in starts:
for end in ends:
connected = self.find_path(self.Graph, self._registered_instances[start], self._registered_instances[end])
if connected is None:
return False
return True
def find_path(self, graph, start, end, path=[]):
path = path + [start]
if start == end:
return path
if not graph.has_key(start):
return None
for node in graph[start]:
if node not in path:
newpath = self.find_path(graph, node, end, path)
if newpath:
return newpath
return None
def _get_workflow(self):
return self._workflow
def _set_workflow(self, wf):
self._workflow = wf
def _get_config(self):
return self._config
def _set_config(self, config):
self._config = config
def _get_graph(self):
return self._graph
def _set_graph(self, graph):
self._graph = graph
def config_has_valid_input(self):
valid_input = False
valid_inputs = []
for container in self.Config['containers']:
if container['type'] == 'Adapters':
valid_input = True
valid_inputs.append(container['cid'])
return (valid_input, valid_inputs)
def config_has_valid_output(self):
valid_output = False
valid_outputs = []
for container in self.Config['containers']:
if container['type'] == 'Publishers':
valid_output = True
valid_outputs.append(container['cid'])
return (valid_output, valid_outputs)
def translate(self):
status = None
G = {}
for entry in self.Config['wires']:
try:
source_container_id = entry['src']['moduleId']
target_container_id = entry['tgt']['moduleId']
input = entry['tgt']['termid']
output = entry['src']['termid']
source_key = self.Config['containers'][source_container_id]['cid']
target_key = self.Config['containers'][target_container_id]['cid']
source = self._registered_instances[source_key]
target = self._registered_instances[target_key]
except:
status = False
else:
if G.has_key(source):
current_children = G[source]
current_children[target] = (output, input)
G[source] = current_children
else:
G[source] = {target:(output, input)}
status = True
# set the graph
self.Graph = G
return status
def send(self, doc):
response = {}
try:
if self.Workflow is not None:
self.Workflow.send(doc)
response['status'] = 'success'
else:
log.error('No workflow defined')
response['status'] = 'failure'
response['error'] = 'No Active Workflow Defined'
except:
response['status'] = 'failure'
response['error'] = 'Unexpected Error Running Project'
return response
def _get_filters(self):
self._filters.sort()
return self._filters
def _get_operators(self):
self._operators.sort()
return self._operators
def _get_extractors(self):
self._extractors.sort()
return self._extractors
def _get_input_adapters(self):
self._input_adapters.sort()
return self._input_adapters
def _get_output_adapters(self):
self._output_adapters.sort()
return self._output_adapters
def _get_transformers(self):
self._transformers.sort()
return self._transformers
def getComponentConfig(self, id):
try:
params = self._registered_instances[id].get_parameters()
except:
log.error('Could not get component with id %s' % id)
params = 'null'
log.debug('Component Parameters: %s' % params)
return params
def setParam(self, id, param, value):
try:
self._registered_instances[id].set_parameter(param, value)
log.debug('Setting Parameters: %s = %s' % (param, value))
except:
log.error('Unable to set paramaters: %s' % (param, value))
# class properties
Config = property(_get_config, _set_config)
Graph = property(_get_graph, _set_graph)
Workflow = property(_get_workflow, _set_workflow)
Filters = property(_get_filters)
Transformers = property(_get_transformers)
Extractors = property(_get_extractors)
InputAdapters = property(_get_input_adapters)
OutputAdapters = property(_get_output_adapters)
Operators = property(_get_operators)
def loadConfig(self, config):
for mod in config['containers']:
this_mod = mod['filterName']
id = self.newInstance(this_mod)
mod['cid'] = id
try:
for key, val in mod['params'].items():
self.setParam(id, key, val[0])
except:
pass
self.update(json.dumps(config))
| apache-2.0 |
nikolas/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/gis/utils/geoip.py | 316 | 14811 | """
This module houses the GeoIP object, a ctypes wrapper for the MaxMind GeoIP(R)
C API (http://www.maxmind.com/app/c). This is an alternative to the GPL
licensed Python GeoIP interface provided by MaxMind.
GeoIP(R) is a registered trademark of MaxMind, LLC of Boston, Massachusetts.
For IP-based geolocation, this module requires the GeoLite Country and City
datasets, in binary format (CSV will not work!). The datasets may be
downloaded from MaxMind at http://www.maxmind.com/download/geoip/database/.
Grab GeoIP.dat.gz and GeoLiteCity.dat.gz, and unzip them in the directory
corresponding to settings.GEOIP_PATH. See the GeoIP docstring and examples
below for more details.
TODO: Verify compatibility with Windows.
Example:
>>> from django.contrib.gis.utils import GeoIP
>>> g = GeoIP()
>>> g.country('google.com')
{'country_code': 'US', 'country_name': 'United States'}
>>> g.city('72.14.207.99')
{'area_code': 650,
'city': 'Mountain View',
'country_code': 'US',
'country_code3': 'USA',
'country_name': 'United States',
'dma_code': 807,
'latitude': 37.419200897216797,
'longitude': -122.05740356445312,
'postal_code': '94043',
'region': 'CA'}
>>> g.lat_lon('salon.com')
(37.789798736572266, -122.39420318603516)
>>> g.lon_lat('uh.edu')
(-95.415199279785156, 29.77549934387207)
>>> g.geos('24.124.1.80').wkt
'POINT (-95.2087020874023438 39.0392990112304688)'
"""
import os, re
from ctypes import c_char_p, c_float, c_int, Structure, CDLL, POINTER
from ctypes.util import find_library
from django.conf import settings
if not settings.configured: settings.configure()
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = dict((key, getattr(settings, key))
for key in ('GEOIP_PATH', 'GEOIP_LIBRARY_PATH', 'GEOIP_COUNTRY', 'GEOIP_CITY')
if hasattr(settings, key))
lib_path = GEOIP_SETTINGS.get('GEOIP_LIBRARY_PATH', None)
# GeoIP Exception class.
class GeoIPException(Exception): pass
# The shared library for the GeoIP C API. May be downloaded
# from http://www.maxmind.com/download/geoip/api/c/
if lib_path:
lib_name = None
else:
# TODO: Is this really the library name for Windows?
lib_name = 'GeoIP'
# Getting the path to the GeoIP library.
if lib_name: lib_path = find_library(lib_name)
if lib_path is None: raise GeoIPException('Could not find the GeoIP library (tried "%s"). '
'Try setting GEOIP_LIBRARY_PATH in your settings.' % lib_name)
lgeoip = CDLL(lib_path)
# Regular expressions for recognizing IP addresses and the GeoIP
# free database editions.
ipregex = re.compile(r'^(?P<w>\d\d?\d?)\.(?P<x>\d\d?\d?)\.(?P<y>\d\d?\d?)\.(?P<z>\d\d?\d?)$')
free_regex = re.compile(r'^GEO-\d{3}FREE')
lite_regex = re.compile(r'^GEO-\d{3}LITE')
#### GeoIP C Structure definitions ####
class GeoIPRecord(Structure):
_fields_ = [('country_code', c_char_p),
('country_code3', c_char_p),
('country_name', c_char_p),
('region', c_char_p),
('city', c_char_p),
('postal_code', c_char_p),
('latitude', c_float),
('longitude', c_float),
# TODO: In 1.4.6 this changed from `int dma_code;` to
# `union {int metro_code; int dma_code;};`. Change
# to a `ctypes.Union` in to accomodate in future when
# pre-1.4.6 versions are no longer distributed.
('dma_code', c_int),
('area_code', c_int),
# TODO: The following structure fields were added in 1.4.3 --
# uncomment these fields when sure previous versions are no
# longer distributed by package maintainers.
#('charset', c_int),
#('continent_code', c_char_p),
]
class GeoIPTag(Structure): pass
#### ctypes function prototypes ####
RECTYPE = POINTER(GeoIPRecord)
DBTYPE = POINTER(GeoIPTag)
# For retrieving records by name or address.
def record_output(func):
func.restype = RECTYPE
return func
rec_by_addr = record_output(lgeoip.GeoIP_record_by_addr)
rec_by_name = record_output(lgeoip.GeoIP_record_by_name)
# For opening & closing GeoIP database files.
geoip_open = lgeoip.GeoIP_open
geoip_open.restype = DBTYPE
geoip_close = lgeoip.GeoIP_delete
geoip_close.argtypes = [DBTYPE]
geoip_close.restype = None
# String output routines.
def string_output(func):
func.restype = c_char_p
return func
geoip_dbinfo = string_output(lgeoip.GeoIP_database_info)
cntry_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr)
cntry_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name)
cntry_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr)
cntry_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name)
#### GeoIP class ####
class GeoIP(object):
# The flags for GeoIP memory caching.
# GEOIP_STANDARD - read database from filesystem, uses least memory.
#
# GEOIP_MEMORY_CACHE - load database into memory, faster performance
# but uses more memory
#
# GEOIP_CHECK_CACHE - check for updated database. If database has been updated,
# reload filehandle and/or memory cache.
#
# GEOIP_INDEX_CACHE - just cache
# the most frequently accessed index portion of the database, resulting
# in faster lookups than GEOIP_STANDARD, but less memory usage than
# GEOIP_MEMORY_CACHE - useful for larger databases such as
# GeoIP Organization and GeoIP City. Note, for GeoIP Country, Region
# and Netspeed databases, GEOIP_INDEX_CACHE is equivalent to GEOIP_MEMORY_CACHE
#
GEOIP_STANDARD = 0
GEOIP_MEMORY_CACHE = 1
GEOIP_CHECK_CACHE = 2
GEOIP_INDEX_CACHE = 4
cache_options = dict((opt, None) for opt in (0, 1, 2, 4))
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initializes the GeoIP object, no parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP data sets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.dat) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH settings attribute.
* cache: The cache settings when opening up the GeoIP datasets,
and may be an integer in (0, 1, 2, 4) corresponding to
the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE,
and GEOIP_INDEX_CACHE `GeoIPOptions` C API settings,
respectively. Defaults to 0, meaning that the data is read
from the disk.
* country: The name of the GeoIP country data file. Defaults to
'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute.
* city: The name of the GeoIP city data file. Defaults to
'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = self.cache_options[cache]
else:
raise GeoIPException('Invalid caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS.get('GEOIP_PATH', None)
if not path: raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, basestring):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try and open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat'))
if os.path.isfile(country_db):
self._country = geoip_open(country_db, cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat'))
if os.path.isfile(city_db):
self._city = geoip_open(city_db, cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure
# out whether the given database path is for the GeoIP country
# or city databases.
ptr = geoip_open(path, cache)
info = geoip_dbinfo(ptr)
if lite_regex.match(info):
# GeoLite City database detected.
self._city = ptr
self._city_file = path
elif free_regex.match(info):
# GeoIP Country database detected.
self._country = ptr
self._country_file = path
else:
raise GeoIPException('Unable to recognize database edition: %s' % info)
else:
raise GeoIPException('GeoIP path must be a valid file or directory.')
def __del__(self):
# Cleaning any GeoIP file handles lying around.
if self._country: geoip_close(self._country)
if self._city: geoip_close(self._city)
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, basestring):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIPException('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIPException('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIPException('Invalid GeoIP city data file: %s' % self._city_file)
def city(self, query):
"""
Returns a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
self._check_query(query, city=True)
if ipregex.match(query):
# If an IP address was passed in
ptr = rec_by_addr(self._city, c_char_p(query))
else:
# If a FQDN was passed in.
ptr = rec_by_name(self._city, c_char_p(query))
# Checking the pointer to the C structure, if valid pull out elements
# into a dicionary and return.
if bool(ptr):
record = ptr.contents
return dict((tup[0], getattr(record, tup[0])) for tup in record._fields_)
else:
return None
def country_code(self, query):
"Returns the country code for the given IP Address or FQDN."
self._check_query(query, city_or_country=True)
if self._country:
if ipregex.match(query): return cntry_code_by_addr(self._country, query)
else: return cntry_code_by_name(self._country, query)
else:
return self.city(query)['country_code']
def country_name(self, query):
"Returns the country name for the given IP Address or FQDN."
self._check_query(query, city_or_country=True)
if self._country:
if ipregex.match(query): return cntry_name_by_addr(self._country, query)
else: return cntry_name_by_name(self._country, query)
else:
return self.city(query)['country_name']
def country(self, query):
"""
Returns a dictonary with with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
return {'country_code' : self.country_code(query),
'country_name' : self.country_name(query),
}
#### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None: return None
else: return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Returns a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Returns a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Returns a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
#### GeoIP Database Information Routines ####
def country_info(self):
"Returns information about the GeoIP country database."
if self._country is None:
ci = 'No GeoIP Country data in "%s"' % self._country_file
else:
ci = geoip_dbinfo(self._country)
return ci
country_info = property(country_info)
def city_info(self):
"Retuns information about the GeoIP city database."
if self._city is None:
ci = 'No GeoIP City data in "%s"' % self._city_file
else:
ci = geoip_dbinfo(self._city)
return ci
city_info = property(city_info)
def info(self):
"Returns information about all GeoIP databases in use."
return 'Country:\n\t%s\nCity:\n\t%s' % (self.country_info, self.city_info)
info = property(info)
#### Methods for compatibility w/the GeoIP-Python API. ####
@classmethod
def open(cls, full_path, cache):
return GeoIP(full_path, cache)
def _rec_by_arg(self, arg):
if self._city:
return self.city(arg)
else:
return self.country(arg)
region_by_addr = city
region_by_name = city
record_by_addr = _rec_by_arg
record_by_name = _rec_by_arg
country_code_by_addr = country_code
country_code_by_name = country_code
country_name_by_addr = country_name
country_name_by_name = country_name
| gpl-3.0 |
LiaoPan/scikit-learn | sklearn/datasets/olivetti_faces.py | 197 | 4688 | """Modified Olivetti faces dataset.
The original database was available from (now defunct)
http://www.uk.research.att.com/facedatabase.html
The version retrieved here comes in MATLAB format from the personal
web page of Sam Roweis:
http://www.cs.nyu.edu/~roweis/
There are ten different images of each of 40 distinct subjects. For some
subjects, the images were taken at different times, varying the lighting,
facial expressions (open / closed eyes, smiling / not smiling) and facial
details (glasses / no glasses). All the images were taken against a dark
homogeneous background with the subjects in an upright, frontal position (with
tolerance for some side movement).
The original dataset consisted of 92 x 112, while the Roweis version
consists of 64x64 images.
"""
# Copyright (c) 2011 David Warde-Farley <wardefar at iro dot umontreal dot ca>
# License: BSD 3 clause
from io import BytesIO
from os.path import join, exists
from os import makedirs
try:
# Python 2
import urllib2
urlopen = urllib2.urlopen
except ImportError:
# Python 3
import urllib.request
urlopen = urllib.request.urlopen
import numpy as np
from scipy.io.matlab import loadmat
from .base import get_data_home, Bunch
from ..utils import check_random_state
from ..externals import joblib
DATA_URL = "http://cs.nyu.edu/~roweis/data/olivettifaces.mat"
TARGET_FILENAME = "olivetti.pkz"
# Grab the module-level docstring to use as a description of the
# dataset
MODULE_DOCS = __doc__
def fetch_olivetti_faces(data_home=None, shuffle=False, random_state=0,
download_if_missing=True):
"""Loader for the Olivetti faces data-set from AT&T.
Read more in the :ref:`User Guide <olivetti_faces>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
shuffle : boolean, optional
If True the order of the dataset is shuffled to avoid having
images of the same person grouped.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : optional, integer or RandomState object
The seed or the random number generator used to shuffle the
data.
Returns
-------
An object with the following attributes:
data : numpy array of shape (400, 4096)
Each row corresponds to a ravelled face image of original size 64 x 64 pixels.
images : numpy array of shape (400, 64, 64)
Each row is a face image corresponding to one of the 40 subjects of the dataset.
target : numpy array of shape (400, )
Labels associated to each face image. Those labels are ranging from
0-39 and correspond to the Subject IDs.
DESCR : string
Description of the modified Olivetti Faces Dataset.
Notes
------
This dataset consists of 10 pictures each of 40 individuals. The original
database was available from (now defunct)
http://www.uk.research.att.com/facedatabase.html
The version retrieved here comes in MATLAB format from the personal
web page of Sam Roweis:
http://www.cs.nyu.edu/~roweis/
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
if not exists(join(data_home, TARGET_FILENAME)):
print('downloading Olivetti faces from %s to %s'
% (DATA_URL, data_home))
fhandle = urlopen(DATA_URL)
buf = BytesIO(fhandle.read())
mfile = loadmat(buf)
faces = mfile['faces'].T.copy()
joblib.dump(faces, join(data_home, TARGET_FILENAME), compress=6)
del mfile
else:
faces = joblib.load(join(data_home, TARGET_FILENAME))
# We want floating point data, but float32 is enough (there is only
# one byte of precision in the original uint8s anyway)
faces = np.float32(faces)
faces = faces - faces.min()
faces /= faces.max()
faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1)
# 10 images per class, 400 images total, each class is contiguous.
target = np.array([i // 10 for i in range(400)])
if shuffle:
random_state = check_random_state(random_state)
order = random_state.permutation(len(faces))
faces = faces[order]
target = target[order]
return Bunch(data=faces.reshape(len(faces), -1),
images=faces,
target=target,
DESCR=MODULE_DOCS)
| bsd-3-clause |
LiaoPan/scikit-learn | examples/cluster/plot_mean_shift.py | 348 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
ZenDevelopmentSystems/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 246 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
fyffyt/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 246 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
seckcoder/lang-learn | python/sklearn/sklearn/ensemble/partial_dependence.py | 2 | 14655 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD Style.
from itertools import count, izip
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..utils import array2d
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(map(lambda x: 0.0 <= x <= 1.0, percentiles)):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluted (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier().fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs)
(array([[-10.72892297, 10.72892297]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = array2d(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in xrange(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are aranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in gbrt.classes_ .
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in gbrt.classes_' % str(label))
else:
# regression and binary classification
label_idx = 0
X = array2d(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = map(str, range(gbrt.n_features))
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, basestring):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral, basestring)):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
names.append([feature_names[i] for i in fxs])
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in izip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(map(np.size, axes)).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| unlicense |
pdamodaran/yellowbrick | yellowbrick/utils/target.py | 1 | 2239 | # yellowbrick.utils.target
# Helper functions related to the target variable.
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Thu Dec 27 20:16:18 2018 -0500
#
# For license information, see LICENSE.txt
#
# ID: target.py [] benjamin@bengfort.com $
"""
Helper functions related to the target variable.
"""
##########################################################################
## Imports and Module Variables
##########################################################################
import numpy as np
from sklearn.utils.multiclass import type_of_target
__all__ = [
'CONTINUOUS', 'DISCRETE', 'UNKNOWN', 'MAX_DISCRETE_CLASSES', 'target_color_type'
]
CONTINUOUS = "continuous"
DISCRETE = "discrete"
UNKNOWN = "unknown"
MAX_DISCRETE_CLASSES = 12
##########################################################################
## Helper Functions
##########################################################################
def target_color_type(y):
"""
Determines the type of color space that will best represent the target
variable y, e.g. either a discrete (categorical) color space or a
continuous color space that requires a colormap. This function can handle
both 1D or column vectors as well as multi-output targets.
Parameters
----------
y : array-like
Must be a valid array-like data structure that can be passed to a
scikit-learn supervised estimator.
Returns
-------
color_type : string
One of:
* 'discrete': `y` is either a binary target or a multiclass target
with <= 12 discrete classes.
* 'continuous': `y` is an array-like of floats that are not all
integers or a multiclass target with > 12 discrete classes.
* 'unknown': `y` is array-like but none of the above. For example
a multilabel-indicator or a 3D array. No exception is raised.
"""
ttype = type_of_target(y)
if ttype.startswith(CONTINUOUS):
return CONTINUOUS
if ttype.startswith("binary"):
return DISCRETE
if ttype.startswith("multiclass"):
if len(np.unique(y)) > MAX_DISCRETE_CLASSES:
return CONTINUOUS
return DISCRETE
return UNKNOWN
| apache-2.0 |
parthea/pydatalab | legacy_tests/bigquery/view_tests.py | 2 | 6022 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import str
import mock
from oauth2client.client import AccessTokenCredentials
import unittest
import datalab.bigquery
import datalab.context
class TestCases(unittest.TestCase):
def test_view_repr_sql(self):
name = 'test:testds.testView0'
view = datalab.bigquery.View(name, TestCases._create_context())
self.assertEqual('[%s]' % name, view._repr_sql_())
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.tables_list')
@mock.patch('datalab.bigquery._api.Api.datasets_get')
def test_view_create(self,
mock_api_datasets_get,
mock_api_tables_list,
mock_api_tables_get,
mock_api_tables_insert):
mock_api_datasets_get.return_value = None
mock_api_tables_list.return_value = []
mock_api_tables_get.return_value = None
mock_api_tables_insert.return_value = TestCases._create_tables_insert_success_result()
name = 'test:testds.testView0'
sql = 'select * from test:testds.testTable0'
view = datalab.bigquery.View(name, TestCases._create_context())
result = view.create(sql)
self.assertTrue(view.exists())
self.assertEqual(name, str(view))
self.assertEqual('[%s]' % name, view._repr_sql_())
self.assertIsNotNone(result, 'Expected a view')
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tabledata_list')
@mock.patch('datalab.bigquery._api.Api.jobs_insert_query')
@mock.patch('datalab.bigquery._api.Api.jobs_query_results')
@mock.patch('datalab.bigquery._api.Api.jobs_get')
@mock.patch('datalab.bigquery._api.Api.tables_get')
def test_view_result(self, mock_api_tables_get, mock_api_jobs_get, mock_api_jobs_query_results,
mock_api_insert_query, mock_api_tabledata_list, mock_api_tables_insert):
mock_api_insert_query.return_value = TestCases._create_insert_done_result()
mock_api_tables_insert.return_value = TestCases._create_tables_insert_success_result()
mock_api_jobs_query_results.return_value = {'jobComplete': True}
mock_api_tables_get.return_value = TestCases._create_tables_get_result()
mock_api_jobs_get.return_value = {'status': {'state': 'DONE'}}
mock_api_tabledata_list.return_value = TestCases._create_single_row_result()
name = 'test:testds.testView0'
sql = 'select * from test:testds.testTable0'
view = datalab.bigquery.View(name, TestCases._create_context())
view.create(sql)
results = view.results()
self.assertEqual(1, results.length)
first_result = results[0]
self.assertEqual('value1', first_result['field1'])
@mock.patch('datalab.bigquery._api.Api.tables_insert')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.bigquery._api.Api.table_update')
@mock.patch('datalab.context.Context.default')
def test_view_update(self, mock_context_default, mock_api_table_update,
mock_api_tables_get, mock_api_tables_insert):
mock_api_tables_insert.return_value = TestCases._create_tables_insert_success_result()
mock_context_default.return_value = TestCases._create_context()
mock_api_table_update.return_value = None
friendly_name = 'casper'
description = 'ghostly logs'
sql = 'select * from [test:testds.testTable0]'
info = {'friendlyName': friendly_name,
'description': description,
'view': {'query': sql}}
mock_api_tables_get.return_value = info
name = 'test:testds.testView0'
view = datalab.bigquery.View(name, TestCases._create_context())
view.create(sql)
self.assertEqual(friendly_name, view.friendly_name)
self.assertEqual(description, view.description)
self.assertEqual(sql, view.query.sql)
new_friendly_name = 'aziraphale'
new_description = 'demon duties'
new_query = 'SELECT 3 AS x'
view.update(new_friendly_name, new_description, new_query)
self.assertEqual(new_friendly_name, view.friendly_name)
self.assertEqual(new_description, view.description)
self.assertEqual(new_query, view.query.sql)
@staticmethod
def _create_tables_insert_success_result():
return {'selfLink': 'http://foo'}
@staticmethod
def _create_insert_done_result():
# pylint: disable=g-continuation-in-parens-misaligned
return {
'jobReference': {
'jobId': 'test_job'
},
'configuration': {
'query': {
'destinationTable': {
'projectId': 'project',
'datasetId': 'dataset',
'tableId': 'table'
}
}
},
'jobComplete': True,
}
@staticmethod
def _create_tables_get_result(num_rows=1, schema=None):
if not schema:
schema = [{'name': 'field1', 'type': 'string'}]
return {
'numRows': num_rows,
'schema': {
'fields': schema
},
}
@staticmethod
def _create_single_row_result():
# pylint: disable=g-continuation-in-parens-misaligned
return {
'totalRows': 1,
'rows': [
{'f': [{'v': 'value1'}]}
]
}
@staticmethod
def _create_context():
project_id = 'test'
creds = AccessTokenCredentials('test_token', 'test_ua')
return datalab.context.Context(project_id, creds)
| apache-2.0 |
ZenDevelopmentSystems/scikit-learn | sklearn/metrics/__init__.py | 212 | 3440 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
Tong-Chen/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 3 | 14885 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics.scorer import SCORERS
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
"""Ridge regression convergence test using score
TODO: for this test to be robust, we should use a dataset instead
of np.random.
"""
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "dense_cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "dense_cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "dense_cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples / 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples / 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
alpha = 1.0
#for solver in ("svd", "sparse_cg", "dense_cholesky", "lsqr"):
for solver in ("dense_cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y, alpha, sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
def test_ridge_shapes():
"""Test shape of coef_ and intercept_
"""
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
"""Test intercept with multiple targets GH issue #708
"""
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
"""Test BayesianRegression ridge classifier
TODO: test also n_samples > n_features
"""
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
"""On alpha=0., Ridge and OLS yield the same solution."""
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
"""Tests the ridge object using individual penalties"""
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="dense_cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'dense_cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
ridge_gcv2 = RidgeCV(fit_intercept=False, loss_func=mean_squared_error)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
ridge_gcv3 = RidgeCV(fit_intercept=False, score_func=func)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = SCORERS['mean_squared_error']
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_class_weights():
"""
Test class weights.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_class_weights_cv():
"""
Test class weights for cross validated ridge classifier.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
"""
Test _RidgeCV's store_cv_values attribute.
"""
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/sklearn/metrics/classification.py | 6 | 68080 | """Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score <http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| mit |
marinkaz/orange3 | Orange/evaluation/clustering.py | 16 | 5430 | import numpy as np
from sklearn.metrics import silhouette_score, adjusted_mutual_info_score, silhouette_samples
from Orange.data import Table
from Orange.evaluation.testing import Results
from Orange.evaluation.scoring import Score
__all__ = ['ClusteringEvaluation']
class ClusteringResults(Results):
def __init__(self, store_data=True, **kwargs):
super().__init__(store_data=True, **kwargs)
def get_fold(self, fold):
results = ClusteringResults()
results.data = self.data
if self.folds is None:
raise ValueError("This 'Results' instance does not have folds.")
if self.models is not None:
results.models = self.models[fold]
results.row_indices = self.row_indices
results.actual = self.actual
results.predicted = self.predicted[:, fold, :]
results.domain = self.domain
return results
class ClusteringScore(Score):
considers_actual = False
def from_predicted(self, results, score_function):
# Clustering scores from labels
if self.considers_actual:
return np.fromiter(
(score_function(results.actual.flatten(), predicted.flatten())
for predicted in results.predicted),
dtype=np.float64, count=len(results.predicted))
# Clustering scores from data only
else:
return np.fromiter(
(score_function(results.data.X, predicted.flatten())
for predicted in results.predicted),
dtype=np.float64, count=len(results.predicted))
class Silhouette(ClusteringScore):
separate_folds = True
def compute_score(self, results):
return self.from_predicted(results, silhouette_score)
class AdjustedMutualInfoScore(ClusteringScore):
separate_folds = True
considers_actual = True
def compute_score(self, results):
return self.from_predicted(results, adjusted_mutual_info_score)
class ClusteringEvaluation(ClusteringResults):
"""
Clustering evaluation.
If the constructor is given the data and a list of learning algorithms, it
runs clustering and returns an instance of `Results` containing the
predicted clustering labels.
.. attribute:: k
The number of runs.
"""
def __init__(self, data, learners, k=1,
store_models=False):
super().__init__(data=data, nmethods=len(learners), store_data=True,
store_models=store_models, predicted=None)
self.k = k
Y = data.Y.copy().flatten()
self.predicted = np.empty((len(learners), self.k, len(data)))
self.folds = range(k)
self.row_indices = np.arange(len(data))
self.actual = data.Y.flatten() if hasattr(data, "Y") else None
if self.store_models:
self.models = []
for k in range(self.k):
if self.store_models:
fold_models = []
self.models.append(fold_models)
for i, learner in enumerate(learners):
model = learner(data)
if self.store_models:
fold_models.append(model)
labels = model(data)
self.predicted[i, k, :] = labels.X.flatten()
def graph_silhouette(X, y, xlim=None, colors=None, figsize=None, filename=None):
"""
Silhouette plot.
:param filename:
Output file name.
:param X Orange.data.Table or numpy.ndarray
Data table.
:param y Orange.data.Table or numpy.ndarray:
Cluster labels (integers).
:param colors list, optional (default = None):
List of colors. If provided, it must equal the number of clusters.
:param figsize tuple (float, float):
Figure size (width, height) in inches.
:param xlim tuple (float, float):
Limit x-axis values.
"""
import matplotlib.pyplot as plt
if isinstance(X, Table):
X = X.X
if isinstance(y, Table):
y = y.X
y = y.ravel()
# Detect number of clusters and set colors
N = len(set(y))
if isinstance(colors, type(None)) :
colors = ["g" if i % 2 else "b" for i in range(N)]
elif len(colors) != N:
import sys
sys.stderr.write("Number of colors does not match the number of clusters. \n")
return
# Silhouette coefficients
s = silhouette_samples(X, y)
s = s[np.argsort(y)] # Sort by clusters
parts = []
# Within clusters sort by silhouette scores
for label, (i, j) in enumerate([(sum(y == c1), sum(y == c1) + sum(y == c2))
for c1, c2 in zip(range(-1, N-1), range(0, N))]):
scores = sorted(s[i:j])
parts.append((scores, label))
# Plot data
if figsize:
plt.figure(figsize=figsize)
else:
plt.figure()
plt.title("Silhouette score")
total = 0
centers = []
for i, (scores, label) in enumerate(parts):
plt.barh(range(total, total + len(scores)),
scores, color=colors[i], edgecolor=colors[i])
centers.append(total+len(scores)/2)
total += len(scores)
if not isinstance(xlim, type(None)):
plt.xlim(xlim)
plt.yticks(centers)
plt.gca().set_yticklabels(range(N))
plt.ylabel("Cluster label")
if filename:
plt.savefig(filename)
plt.close()
else:
plt.show()
| bsd-2-clause |
rowhit/h2o-2 | py/testdir_kevin/test_parse_specific_case1.py | 9 | 2651 | import unittest, random, sys, time, os
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i
import codecs, unicodedata
print "create some specific small datasets with exp row/col combinations"
print "This is a known fail for both row and col. Leading unmatched double quote issue"
tryList = [
('''\
"a,b,c,d
"a,b,c,d
"a,b,c,d
"a,b,c,d
"a,b,c,d
a,b,c,d
"a,b,c,d
"a,b,c,d
"a,b,c,d
"a,b,c,d
''',
10, 4, [0,0,0,0], ['Enum', 'Enum', 'Enum', 'Enum']),
]
def write_syn_dataset(csvPathname, dataset):
dsf = codecs.open(csvPathname, encoding='utf-8', mode='w+')
encoded = dataset.decode('utf-8')
print "utf8:" , repr(encoded), type(encoded)
print "str or utf8:" , repr(dataset), type(dataset)
dsf.write(dataset)
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(java_heap_GB=1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_specific_case1(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
hex_key = "a.hex"
for (dataset, expNumRows, expNumCols, expNaCnt, expType) in tryList:
csvFilename = 'specific_' + str(expNumRows) + "x" + str(expNumCols) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
write_syn_dataset(csvPathname, dataset)
parseResult = h2i.import_parse(path=csvPathname, schema='put', header=0,
hex_key=hex_key, timeoutSecs=10, doSummary=False)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=60)
print "inspect:", h2o.dump_json(inspect)
numRows = inspect['numRows']
self.assertEqual(numRows, expNumRows, msg='Wrong numRows: %s Expected: %s' % (numRows, expNumRows))
numCols = inspect['numCols']
self.assertEqual(numCols, expNumCols, msg='Wrong numCols: %s Expected: %s' % (numCols, expNumCols))
# this is required for the test setup
assert(len(expNaCnt)>=expNumCols)
assert(len(expType)>=expNumCols)
for k in range(expNumCols):
naCnt = inspect['cols'][k]['naCnt']
self.assertEqual(expNaCnt[k], naCnt, msg='col %s naCnt %d should be %s' % (k, naCnt, expNaCnt[k]))
stype = inspect['cols'][k]['type']
self.assertEqual(expType[k], stype, msg='col %s type %s should be %s' % (k, stype, expType[k]))
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
pdamodaran/yellowbrick | yellowbrick/contrib/statsmodels/base.py | 1 | 2626 | # yellowbrick.contrib.statsmodels.base
# A basic wrapper for statsmodels that emulates a scikit-learn estimator.
#
# Author: Ian Ozsvald
# Created: Wed Jan 10 12:47:00 2018 -0500
#
# ID: base.py [] benjamin@bengfort.com $
"""
A basic wrapper for statsmodels that emulates a scikit-learn estimator.
"""
##########################################################################
## Imports
##########################################################################
from sklearn.metrics import r2_score
from sklearn.base import BaseEstimator
##########################################################################
## statsmodels Estimator
##########################################################################
class StatsModelsWrapper(BaseEstimator):
"""
Wrap a statsmodels GLM as a sklearn (fake) BaseEstimator for YellowBrick.
Examples
--------
First import the external libraries and helper utilities:
>>> import statsmodels.api as sm
>>> from functools import partial
Instantiate a partial with the statsmodels API:
>>> glm_gaussian_partial = partial(sm.GLM, family=sm.families.Gaussian())
>>> sm_est = StatsModelsWrapper(glm_gaussian_partial)
Create a Yellowbrick visualizer to visualize prediction error:
>>> visualizer = PredictionError(sm_est)
>>> visualizer.fit(X_train, y_train)
>>> visualizer.score(X_test, y_test)
For statsmodels usage, calling .summary() etc:
>>> gaussian_model = glm_gaussian_partial(y_train, X_train)
Note
----
.. note:: This wrapper is trivial, options and extra things like weights
are not currently handled.
"""
def __init__(self, glm_partial, stated_estimator_type="regressor",
scorer=r2_score):
# YellowBrick checks the attribute to see if it is a
# regressor/clusterer/classifier
self._estimator_type = stated_estimator_type
# assume user passes in a partial which we can instantiate later
self.glm_partial = glm_partial
# needs a default scoring function, regression uses r^2 in sklearn
self.scorer = scorer
def fit(self, X, y):
"""
Pretend to be a sklearn estimator, fit is called on creation
"""
# note that GLM takes endog (y) and then exog (X):
# this is the reverse of sklearn's methods
self.glm_model = self.glm_partial(y, X)
self.glm_results = self.glm_model.fit()
return self
def predict(self, X):
return self.glm_results.predict(X)
def score(self, X, y):
return self.scorer(y, self.predict(X))
| apache-2.0 |
Tong-Chen/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 9 | 1629 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
def test_bayesian_on_diabetes():
"""
Test BayesianRidge on diabetes
"""
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
"""
Test BayesianRidge on toy
"""
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
X_test = [[1], [3], [4]]
assert(np.abs(clf.predict(X_test) - [1, 3, 4]).sum() < 1.e-2) # identity
def test_toy_ard_object():
"""
Test BayesianRegression ARD classifier
"""
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
test = [[1], [3], [4]]
assert(np.abs(clf.predict(test) - [1, 3, 4]).sum() < 1.e-3) # identity
| bsd-3-clause |
ZenDevelopmentSystems/scikit-learn | examples/decomposition/plot_incremental_pca.py | 243 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
fyffyt/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 220 | 2702 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
djgagne/scikit-learn | examples/decomposition/plot_incremental_pca.py | 243 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
sambitgaan/nupic | examples/opf/experiments/spatial_classification/category_1/description.py | 32 | 1554 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/category_1.csv'),
'errorMetric': 'avg_err',
'modelParams': {
'sensorParams': { 'verbosity': 0},
'clParams': {
'clVerbosity': 0,
},
}
}
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
| agpl-3.0 |
xupei0610/ComputerGraphics-HW | final/lib/assimp/port/PyAssimp/pyassimp/core.py | 4 | 17722 | """
PyAssimp
This is the main-module of PyAssimp.
"""
import sys
if sys.version_info < (2,6):
raise 'pyassimp: need python 2.6 or newer'
# xrange was renamed range in Python 3 and the original range from Python 2 was removed.
# To keep compatibility with both Python 2 and 3, xrange is set to range for version 3.0 and up.
if sys.version_info >= (3,0):
xrange = range
import ctypes
import os
try: import numpy
except: numpy = None
import logging
logger = logging.getLogger("pyassimp")
# attach default null handler to logger so it doesn't complain
# even if you don't attach another handler to logger
logger.addHandler(logging.NullHandler())
from . import structs
from . import helper
from . import postprocess
from .errors import AssimpError
from .formats import available_formats
class AssimpLib(object):
"""
Assimp-Singleton
"""
load, load_mem, export, release, dll = helper.search_library()
_assimp_lib = AssimpLib()
def make_tuple(ai_obj, type = None):
res = None
#notes:
# ai_obj._fields_ = [ ("attr", c_type), ... ]
# getattr(ai_obj, e[0]).__class__ == float
if isinstance(ai_obj, structs.Matrix4x4):
if numpy:
res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]).reshape((4,4))
#import pdb;pdb.set_trace()
else:
res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_]
res = [res[i:i+4] for i in xrange(0,16,4)]
elif isinstance(ai_obj, structs.Matrix3x3):
if numpy:
res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_]).reshape((3,3))
else:
res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_]
res = [res[i:i+3] for i in xrange(0,9,3)]
else:
if numpy:
res = numpy.array([getattr(ai_obj, e[0]) for e in ai_obj._fields_])
else:
res = [getattr(ai_obj, e[0]) for e in ai_obj._fields_]
return res
# It is faster and more correct to have an init function for each assimp class
def _init_face(aiFace):
aiFace.indices = [aiFace.mIndices[i] for i in range(aiFace.mNumIndices)]
assimp_struct_inits = { structs.Face : _init_face }
def call_init(obj, caller = None):
if helper.hasattr_silent(obj,'contents'): #pointer
_init(obj.contents, obj, caller)
else:
_init(obj,parent=caller)
def _is_init_type(obj):
if helper.hasattr_silent(obj,'contents'): #pointer
return _is_init_type(obj[0])
# null-pointer case that arises when we reach a mesh attribute
# like mBitangents which use mNumVertices rather than mNumBitangents
# so it breaks the 'is iterable' check.
# Basically:
# FIXME!
elif not bool(obj):
return False
tname = obj.__class__.__name__
return not (tname[:2] == 'c_' or tname == 'Structure' \
or tname == 'POINTER') and not isinstance(obj,int)
def _init(self, target = None, parent = None):
"""
Custom initialize() for C structs, adds safely accessible member functionality.
:param target: set the object which receive the added methods. Useful when manipulating
pointers, to skip the intermediate 'contents' deferencing.
"""
if not target:
target = self
dirself = dir(self)
for m in dirself:
if m.startswith("_"):
continue
if m.startswith('mNum'):
if 'm' + m[4:] in dirself:
continue # will be processed later on
else:
name = m[1:].lower()
obj = getattr(self, m)
setattr(target, name, obj)
continue
if m == 'mName':
obj = self.mName
try:
uni = unicode(obj.data, errors='ignore')
except:
uni = str(obj.data, errors='ignore')
target.name = str( uni )
target.__class__.__repr__ = lambda x: str(x.__class__) + "(" + x.name + ")"
target.__class__.__str__ = lambda x: x.name
continue
name = m[1:].lower()
obj = getattr(self, m)
# Create tuples
if isinstance(obj, structs.assimp_structs_as_tuple):
setattr(target, name, make_tuple(obj))
logger.debug(str(self) + ": Added array " + str(getattr(target, name)) + " as self." + name.lower())
continue
if m.startswith('m'):
if name == "parent":
setattr(target, name, parent)
logger.debug("Added a parent as self." + name)
continue
if helper.hasattr_silent(self, 'mNum' + m[1:]):
length = getattr(self, 'mNum' + m[1:])
# -> special case: properties are
# stored as a dict.
if m == 'mProperties':
setattr(target, name, _get_properties(obj, length))
continue
if not length: # empty!
setattr(target, name, [])
logger.debug(str(self) + ": " + name + " is an empty list.")
continue
try:
if obj._type_ in structs.assimp_structs_as_tuple:
if numpy:
setattr(target, name, numpy.array([make_tuple(obj[i]) for i in range(length)], dtype=numpy.float32))
logger.debug(str(self) + ": Added an array of numpy arrays (type "+ str(type(obj)) + ") as self." + name)
else:
setattr(target, name, [make_tuple(obj[i]) for i in range(length)])
logger.debug(str(self) + ": Added a list of lists (type "+ str(type(obj)) + ") as self." + name)
else:
setattr(target, name, [obj[i] for i in range(length)]) #TODO: maybe not necessary to recreate an array?
logger.debug(str(self) + ": Added list of " + str(obj) + " " + name + " as self." + name + " (type: " + str(type(obj)) + ")")
# initialize array elements
try:
init = assimp_struct_inits[type(obj[0])]
except KeyError:
if _is_init_type(obj[0]):
for e in getattr(target, name):
call_init(e, target)
else:
for e in getattr(target, name):
init(e)
except IndexError:
logger.error("in " + str(self) +" : mismatch between mNum" + name + " and the actual amount of data in m" + name + ". This may be due to version mismatch between libassimp and pyassimp. Quitting now.")
sys.exit(1)
except ValueError as e:
logger.error("In " + str(self) + "->" + name + ": " + str(e) + ". Quitting now.")
if "setting an array element with a sequence" in str(e):
logger.error("Note that pyassimp does not currently "
"support meshes with mixed triangles "
"and quads. Try to load your mesh with"
" a post-processing to triangulate your"
" faces.")
raise e
else: # starts with 'm' but not iterable
setattr(target, name, obj)
logger.debug("Added " + name + " as self." + name + " (type: " + str(type(obj)) + ")")
if _is_init_type(obj):
call_init(obj, target)
if isinstance(self, structs.Mesh):
_finalize_mesh(self, target)
if isinstance(self, structs.Texture):
_finalize_texture(self, target)
return self
def pythonize_assimp(type, obj, scene):
""" This method modify the Assimp data structures
to make them easier to work with in Python.
Supported operations:
- MESH: replace a list of mesh IDs by reference to these meshes
- ADDTRANSFORMATION: add a reference to an object's transformation taken from their associated node.
:param type: the type of modification to operate (cf above)
:param obj: the input object to modify
:param scene: a reference to the whole scene
"""
if type == "MESH":
meshes = []
for i in obj:
meshes.append(scene.meshes[i])
return meshes
if type == "ADDTRANSFORMATION":
def getnode(node, name):
if node.name == name: return node
for child in node.children:
n = getnode(child, name)
if n: return n
node = getnode(scene.rootnode, obj.name)
if not node:
raise AssimpError("Object " + str(obj) + " has no associated node!")
setattr(obj, "transformation", node.transformation)
def recur_pythonize(node, scene):
'''
Recursively call pythonize_assimp on
nodes tree to apply several post-processing to
pythonize the assimp datastructures.
'''
node.meshes = pythonize_assimp("MESH", node.meshes, scene)
for mesh in node.meshes:
mesh.material = scene.materials[mesh.materialindex]
for cam in scene.cameras:
pythonize_assimp("ADDTRANSFORMATION", cam, scene)
for c in node.children:
recur_pythonize(c, scene)
def load(filename,
file_type = None,
processing = postprocess.aiProcess_Triangulate):
'''
Load a model into a scene. On failure throws AssimpError.
Arguments
---------
filename: Either a filename or a file object to load model from.
If a file object is passed, file_type MUST be specified
Otherwise Assimp has no idea which importer to use.
This is named 'filename' so as to not break legacy code.
processing: assimp postprocessing parameters. Verbose keywords are imported
from postprocessing, and the parameters can be combined bitwise to
generate the final processing value. Note that the default value will
triangulate quad faces. Example of generating other possible values:
processing = (pyassimp.postprocess.aiProcess_Triangulate |
pyassimp.postprocess.aiProcess_OptimizeMeshes)
file_type: string of file extension, such as 'stl'
Returns
---------
Scene object with model data
'''
if hasattr(filename, 'read'):
'''
This is the case where a file object has been passed to load.
It is calling the following function:
const aiScene* aiImportFileFromMemory(const char* pBuffer,
unsigned int pLength,
unsigned int pFlags,
const char* pHint)
'''
if file_type == None:
raise AssimpError('File type must be specified when passing file objects!')
data = filename.read()
model = _assimp_lib.load_mem(data,
len(data),
processing,
file_type)
else:
# a filename string has been passed
model = _assimp_lib.load(filename.encode("ascii"), processing)
if not model:
raise AssimpError('Could not import file!')
scene = _init(model.contents)
recur_pythonize(scene.rootnode, scene)
return scene
def export(scene,
filename,
file_type = None,
processing = postprocess.aiProcess_Triangulate):
'''
Export a scene. On failure throws AssimpError.
Arguments
---------
scene: scene to export.
filename: Filename that the scene should be exported to.
file_type: string of file exporter to use. For example "collada".
processing: assimp postprocessing parameters. Verbose keywords are imported
from postprocessing, and the parameters can be combined bitwise to
generate the final processing value. Note that the default value will
triangulate quad faces. Example of generating other possible values:
processing = (pyassimp.postprocess.aiProcess_Triangulate |
pyassimp.postprocess.aiProcess_OptimizeMeshes)
'''
from ctypes import pointer
exportStatus = _assimp_lib.export(pointer(scene), file_type.encode("ascii"), filename.encode("ascii"), processing)
if exportStatus != 0:
raise AssimpError('Could not export scene!')
def release(scene):
from ctypes import pointer
_assimp_lib.release(pointer(scene))
def _finalize_texture(tex, target):
setattr(target, "achformathint", tex.achFormatHint)
if numpy:
data = numpy.array([make_tuple(getattr(tex, "pcData")[i]) for i in range(tex.mWidth * tex.mHeight)])
else:
data = [make_tuple(getattr(tex, "pcData")[i]) for i in range(tex.mWidth * tex.mHeight)]
setattr(target, "data", data)
def _finalize_mesh(mesh, target):
""" Building of meshes is a bit specific.
We override here the various datasets that can
not be process as regular fields.
For instance, the length of the normals array is
mNumVertices (no mNumNormals is available)
"""
nb_vertices = getattr(mesh, "mNumVertices")
def fill(name):
mAttr = getattr(mesh, name)
if numpy:
if mAttr:
data = numpy.array([make_tuple(getattr(mesh, name)[i]) for i in range(nb_vertices)], dtype=numpy.float32)
setattr(target, name[1:].lower(), data)
else:
setattr(target, name[1:].lower(), numpy.array([], dtype="float32"))
else:
if mAttr:
data = [make_tuple(getattr(mesh, name)[i]) for i in range(nb_vertices)]
setattr(target, name[1:].lower(), data)
else:
setattr(target, name[1:].lower(), [])
def fillarray(name):
mAttr = getattr(mesh, name)
data = []
for index, mSubAttr in enumerate(mAttr):
if mSubAttr:
data.append([make_tuple(getattr(mesh, name)[index][i]) for i in range(nb_vertices)])
if numpy:
setattr(target, name[1:].lower(), numpy.array(data, dtype=numpy.float32))
else:
setattr(target, name[1:].lower(), data)
fill("mNormals")
fill("mTangents")
fill("mBitangents")
fillarray("mColors")
fillarray("mTextureCoords")
# prepare faces
if numpy:
faces = numpy.array([f.indices for f in target.faces], dtype=numpy.int32)
else:
faces = [f.indices for f in target.faces]
setattr(target, 'faces', faces)
class PropertyGetter(dict):
def __getitem__(self, key):
semantic = 0
if isinstance(key, tuple):
key, semantic = key
return dict.__getitem__(self, (key, semantic))
def keys(self):
for k in dict.keys(self):
yield k[0]
def __iter__(self):
return self.keys()
def items(self):
for k, v in dict.items(self):
yield k[0], v
def _get_properties(properties, length):
"""
Convenience Function to get the material properties as a dict
and values in a python format.
"""
result = {}
#read all properties
for p in [properties[i] for i in range(length)]:
#the name
p = p.contents
try:
uni = unicode(p.mKey.data, errors='ignore')
except:
uni = str(p.mKey.data, errors='ignore')
key = (str(uni).split('.')[1], p.mSemantic)
#the data
from ctypes import POINTER, cast, c_int, c_float, sizeof
if p.mType == 1:
arr = cast(p.mData, POINTER(c_float * int(p.mDataLength/sizeof(c_float)) )).contents
value = [x for x in arr]
elif p.mType == 3: #string can't be an array
try:
uni = unicode(cast(p.mData, POINTER(structs.MaterialPropertyString)).contents.data, errors='ignore')
except:
uni = str(cast(p.mData, POINTER(structs.MaterialPropertyString)).contents.data, errors='ignore')
value = uni
elif p.mType == 4:
arr = cast(p.mData, POINTER(c_int * int(p.mDataLength/sizeof(c_int)) )).contents
value = [x for x in arr]
else:
value = p.mData[:p.mDataLength]
if len(value) == 1:
[value] = value
result[key] = value
return PropertyGetter(result)
def decompose_matrix(matrix):
if not isinstance(matrix, structs.Matrix4x4):
raise AssimpError("pyassimp.decompose_matrix failed: Not a Matrix4x4!")
scaling = structs.Vector3D()
rotation = structs.Quaternion()
position = structs.Vector3D()
from ctypes import byref, pointer
_assimp_lib.dll.aiDecomposeMatrix(pointer(matrix), byref(scaling), byref(rotation), byref(position))
return scaling._init(), rotation._init(), position._init()
| mit |
sdvillal/manysources | manysources/analyses/losses.py | 1 | 10578 | # coding=utf-8
from itertools import product
from time import time
import os.path as op
import matplotlib.pyplot as plt
import numpy as np
import h5py
import pandas as pd
from sklearn.metrics import roc_auc_score
from whatami import whatable
from manysources.datasets import MANYSOURCES_DATA_ROOT, ManysourcesDataset
from manysources.experiments import ManysourcesResult, DEFAULT_EXPIDS
@whatable
class CVScore(object):
def __init__(self,
name,
is_loss=True,
per_mol=True,
columns=None):
super(CVScore, self).__init__()
self.name = name
self._is_loss = is_loss
self._per_mol = per_mol
self._columns = columns
def per_mol(self):
return self._per_mol
def is_loss(self):
return self._is_loss
def columns(self):
if self._columns is None:
return self.what().id(),
return self._columns
def _compute_one(self, scores, labels, folds):
raise NotImplementedError()
def compute(self, scores, labels, folds):
if not self.per_mol():
df = pd.DataFrame({'scores': scores,
'labels': labels,
'folds': folds})
losses = [self._compute_one(fold_df.scores, fold_df.labels, fold_df.folds)
for fold, fold_df in df.sort('folds').groupby(folds)]
else:
return self._compute_one(scores, labels, folds)
class SquaredError(CVScore):
def __init__(self):
super(SquaredError, self).__init__(name='sqerr',
is_loss=True,
per_mol=True,
columns=('sqerror',))
def _compute_one(self, scores, labels, folds):
return (labels - scores) ** 2
class ROCAUC(CVScore):
def __init__(self):
super(ROCAUC, self).__init__(name='rocauc',
is_loss=False,
per_mol=False,
columns=('rocauc_mean', 'rocauc_std', 'rocauc_stacked'))
def _compute_one(self, scores, labels, folds):
return roc_auc_score(labels, scores) # also tweak "average" and "sample_weight"
def read_losses(dset='bcrp',
feats='ecfps1',
model='logreg3',
#expids=DEFAULT_EXPIDS,
expids=tuple(range(4096)),
calibration=None,
lso=True,
verbose=False,
also_folds=False):
"""
N.B. at the moment, only squared loss.
"""
# the path to the cache file
cache_path = op.join(MANYSOURCES_DATA_ROOT,
'results',
'square_losses.h5')
# the path to the results group
result_coords = '/dset=%s/feats=%s/model=%s/lso=%r/score_calibration=%r' % \
(dset, feats, model, lso, calibration)
#
# each group will have a few datasets:
# - molids: a string list, created once, with the molecule ids
# - expids: a growable dataset with the expid
# - losses: a growable 2 dimensional dataset with a mols per columns and a row per expid (pointing to loss)
# - mfolds: a growable 2 dimensional dataset with a mols per columns and a row per expid (pointing to foldid)
#
# Storage is write-once, read-many, no-delete
#
# Try to read
def read():
with h5py.File(cache_path, 'r') as h5:
group = h5[result_coords]
# do we have all the requested expids?
infile_expids = group['expids'][()] if expids is not None else expids
if 0 == len(set(expids) - set(infile_expids[:, 0])):
e2r = {e: i for e, i in infile_expids if i >= 0}
ok_expids = [expid for expid in expids if expid in e2r]
rows = [e2r[expid] for expid in ok_expids]
losses = group['losses'][rows]
folds = group['folds'][rows]
molids = group['molids'][:]
df_losses = pd.DataFrame(losses, columns=molids, index=ok_expids)
df_folds = None if not also_folds else pd.DataFrame(folds, columns=molids, index=ok_expids)
return df_losses, df_folds
def write():
with h5py.File(cache_path, 'a') as h5:
group = h5.require_group(result_coords)
infile_expids = set(group['expids'][:]) if 'expids' in group else {}
expidss = []
oks = 0
losses = []
foldss = []
molids = None
for expid in expids:
if verbose:
print expid, lso
if expid in infile_expids:
if verbose:
print '\tAlready done, skipping...'
continue
try:
# look for the results corresponding to the desired expid, lso
res = ManysourcesResult(expid=expid, dset=dset, feats=feats, model=model).lsocv() if lso else \
ManysourcesResult(expid=expid, dset=dset, feats=feats, model=model).crscv()
# Merge the "CV" scores to have one score per compound in the dataset
scores, labels, folds = res.merge_scores(calibration=calibration)
if verbose:
print roc_auc_score(labels, scores, average='samples')
losses.append((labels - scores) ** 2)
foldss.append(folds)
if molids is None:
molids = res.molids()
expidss.append((expid, len(infile_expids) + oks))
oks += 1
except:
# We guess that this happens when the external set only contains one class, but we need to check
print 'Warning, had troubles with', expid, lso
expidss.append((expid, -1))
# write molids - N.B. assume same for all of them, which is reasonable
if 'molids' not in group:
group['molids'] = molids
# write expids index
expids_dset = group.require_dataset('expids',
shape=(len(infile_expids) + len(expidss), 2),
dtype=np.int32,
maxshape=(None, 2))
expids_dset.resize((len(infile_expids) + len(expidss), 2))
expids_dset[len(infile_expids):] = expidss
# write losses
losses_dset = group.require_dataset('losses',
shape=(len(infile_expids) + len(losses), len(molids)),
dtype=np.float64,
maxshape=(None, len(molids)))
losses_dset.resize((len(infile_expids) + len(losses), len(molids)))
losses_dset[len(infile_expids):] = losses
# write folds (should be optional)
folds_dset = group.require_dataset('folds',
shape=(len(infile_expids) + len(losses), len(molids)),
dtype=np.int32,
maxshape=(None, len(molids)))
folds_dset.resize((len(infile_expids) + len(losses), len(molids)))
folds_dset[len(infile_expids):] = foldss
try:
return read()
except:
write()
return read()
def collect_all_lossess(lso=True):
"""
Reads accross all our interesting results so far, and write down the losses DataFrame into a big h5 file
"""
dsets = ('bcrp', 'hERG', 'mutagenicity', 'pgp-barbara', 'pgp-barbara-verapamil', 'pgp-cruciani')
featss = ('ecfps1',)
models = ('logreg1', 'logreg3')
calibrations = (None, 'all', '0-1') # 'max-cheating', 'other-folds',
for dset, feats, model, calib in product(dsets, featss, models, calibrations):
print dset, feats, model, calib
start = time()
read_losses(dset=dset, feats=feats, model=model, calibration=calib, lso=lso)
print 'took %.2f seconds' % (time() - start)
def nicita_petits_plot():
# Read-in the data (fast atm)
losses, _ = read_losses()
losses['source'] = ManysourcesDataset('bcrp').mols().molids2sources(losses.index)
df_mean_loss = losses.groupby('source').mean()
df_source_sizes = losses.groupby('source').size()
# Readability
sources = list(df_mean_loss.index)
num_sources = len(sources)
# Split between lso and crs columns
df_lso = df_mean_loss[[col for col in df_mean_loss.columns if 'lso=True' in col]]
df_crs = df_mean_loss[[col for col in df_mean_loss.columns if 'lso=False' in col]]
# Sort by LSO mean loss
order = np.argsort(df_lso.mean(axis=1)) # How to do indirect sorting in pandas?
df_crs = df_crs.iloc[order]
df_lso = df_lso.iloc[order]
df_source_sizes = df_source_sizes.iloc[order]
fig, (axl, axr) = plt.subplots(nrows=1, ncols=2, sharey=False) # Could be true
# Plot LSO and CRS in same subplot
axl.errorbar(df_lso.mean(axis=1),
np.arange(num_sources),
xerr=df_lso.std(axis=1),
fmt='ok', ecolor='gray', alpha=0.5,
label='lso mean sqrl')
axl.errorbar(df_crs.mean(axis=1),
np.arange(num_sources),
xerr=df_crs.std(axis=1),
fmt='ob', ecolor='blue', alpha=0.5,
label='crs mean sqrl')
axl.set_xlabel('mean squared loss')
axl.set_ylabel('source')
axl.set_xlim([0, 1])
axl.set_ylim([-1, num_sources + 1])
axl.set_yticks(range(num_sources))
axl.set_yticklabels(df_lso.index)
axl.legend()
# Plot differences of the mean
axr.errorbar(df_crs.mean(axis=1) - df_lso.mean(axis=1),
np.arange(num_sources),
fmt='ok', ecolor='gray', alpha=0.5)
axr.set_xlabel('difference crs - lso')
axr.set_ylabel('molcount')
axr.set_ylim([-1, num_sources + 1])
axr.set_yticks(range(num_sources))
axr.set_yticklabels(map(str, df_source_sizes))
axr.vlines(0, 0, len(df_lso), linewidth=5, alpha=0.2)
axr.set_xlim([-1, 1])
if __name__ == '__main__':
collect_all_lossess(lso=False)
# nicita_petits_plot()
| bsd-3-clause |
xuleiboy1234/autoTitle | tensorflow/tensorflow/contrib/data/python/util/nest.py | 4 | 18364 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
NOTE(mrry): This fork of the `tensorflow.python.util.nest` module
makes two changes:
1. It adds support for dictionaries as a level of nesting in nested structures.
2. It removes support for lists as a level of nesting in nested structures.
The motivation for this change is twofold:
1. Many input-processing functions (e.g. `tf.parse_example()`) return
dictionaries, and we would like to support them natively in datasets.
2. It seems more natural for lists to be treated (e.g. in Dataset constructors)
as tensors, rather than lists of (lists of...) tensors.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import six as _six
from tensorflow.python.util.all_util import remove_undocumented
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, or a `namedtuple` class.
args: elements to be converted to a sequence.
Returns:
`args` with the type of `instance`.
"""
if isinstance(instance, dict):
# This is a dict. Iterate over the keys in sorted order to make
# this deterministic.
return {k: v for k, v in zip(sorted(instance.keys()), args)}
elif (isinstance(instance, tuple) and
hasattr(instance, "_fields") and
isinstance(instance._fields, _collections.Sequence) and
all(isinstance(f, _six.string_types) for f in instance._fields)):
# This is a namedtuple
return type(instance)(*args)
else:
# Not a namedtuple
return type(instance)(args)
def _elements_of(nest):
if isinstance(nest, dict):
# Iterate over dict keys in sorted order to make this deterministic.
return [v for _, v in sorted(nest.items())]
else:
return nest
def _yield_flat_nest(nest):
for n in _elements_of(nest):
if is_sequence(n):
for ni in _yield_flat_nest(n):
yield ni
else:
yield n
def is_sequence(seq):
"""Returns a true if `seq` is a Sequence or dict (except strings/lists).
NOTE(mrry): This differs from `tensorflow.python.util.nest.is_sequence()`,
which *does* treat a Python list as a sequence. For ergonomic
reasons, `tf.contrib.data` users would prefer to treat lists as
implict `tf.Tensor` objects, and dicts as (nested) sequences.
Args:
seq: an input sequence.
Returns:
True if the sequence is a not a string or list and is a
collections.Sequence.
"""
return (isinstance(seq, (_collections.Sequence, dict))
and not isinstance(seq, (list, _six.string_types)))
def flatten(nest):
"""Returns a flat sequence from a given nested structure.
If `nest` is not a sequence, this returns a single-element list: `[nest]`.
Args:
nest: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the flattened version of the input.
"""
return list(_yield_flat_nest(nest)) if is_sequence(nest) else [nest]
def _recursive_assert_same_structure(nest1, nest2, check_types):
is_sequence_nest1 = is_sequence(nest1)
if is_sequence_nest1 != is_sequence(nest2):
raise ValueError(
"The two structures don't have the same nested structure. "
"First structure: %s, second structure: %s." % (nest1, nest2))
if is_sequence_nest1:
type_nest1 = type(nest1)
type_nest2 = type(nest2)
if check_types and type_nest1 != type_nest2:
raise TypeError(
"The two structures don't have the same sequence type. First "
"structure has type %s, while second structure has type %s."
% (type_nest1, type_nest2))
for n1, n2 in zip(_elements_of(nest1), _elements_of(nest2)):
_recursive_assert_same_structure(n1, n2, check_types)
def assert_same_structure(nest1, nest2, check_types=True):
"""Asserts that two structures are nested in the same way.
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences are checked as
well. If set to `False`, for example a list and a tuple of objects will
look same if they have the same size.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
len_nest1 = len(flatten(nest1)) if is_sequence(nest1) else 1
len_nest2 = len(flatten(nest2)) if is_sequence(nest2) else 1
if len_nest1 != len_nest2:
raise ValueError("The two structures don't have the same number of "
"elements. First structure: %s, second structure: %s."
% (nest1, nest2))
_recursive_assert_same_structure(nest1, nest2, check_types)
def _packed_nest_with_indices(structure, flat, index):
"""Helper function for pack_nest_as.
Args:
structure: Substructure (tuple of elements and/or tuples) to mimic
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in structure:
if is_sequence(s):
new_index, child = _packed_nest_with_indices(s, flat, index)
packed.append(_sequence_like(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a nest.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
Args:
structure: tuple or list constructed of scalars and/or other tuples/lists,
or a scalar. Note: numpy arrays are considered scalars.
flat_sequence: flat sequence to pack.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If nest and structure have different element counts.
"""
if not (is_sequence(flat_sequence) or isinstance(flat_sequence, list)):
raise TypeError("flat_sequence must be a sequence")
if not is_sequence(structure):
if len(flat_sequence) != 1:
raise ValueError("Structure is a scalar but len(flat_sequence) == %d > 1"
% len(flat_sequence))
return flat_sequence[0]
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but flat_sequence "
"had %d elements. Structure: %s, flat_sequence: %s."
% (len(flat_structure), len(flat_sequence), structure, flat_sequence))
_, packed = _packed_nest_with_indices(structure, flat_sequence, 0)
return _sequence_like(structure, packed)
def map_structure(func, *structure, **check_types_dict):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain the results in the same structure.
Args:
func: A callable that acceps as many arguments are there are structures.
*structure: scalar, or tuple or list of constructed scalars and/or other
tuples/lists, or scalars. Note: numpy arrays are considered scalars.
**check_types_dict: only valid keyword argument is `check_types`. If set to
`True` (default) the types of iterables within the structures have to be
same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`. If there are different sequence types and
`check_types` is `False` the sequence types of the first structure will be
used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
if check_types_dict:
if "check_types" not in check_types_dict or len(check_types_dict) > 1:
raise ValueError("Only valid keyword argument is check_types")
check_types = check_types_dict["check_types"]
else:
check_types = True
for other in structure[1:]:
assert_same_structure(structure[0], other, check_types=check_types)
flat_structure = [flatten(s) for s in structure]
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries])
def _yield_flat_up_to(shallow_tree, input_tree):
"""Yields elements `input_tree` partially flattened up to `shallow_tree`."""
if is_sequence(shallow_tree):
for shallow_branch, input_branch in zip(_elements_of(shallow_tree),
_elements_of(input_tree)):
for input_leaf in _yield_flat_up_to(shallow_branch, input_branch):
yield input_leaf
else:
yield input_tree
def assert_shallow_structure(shallow_tree, input_tree, check_types=True):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will not raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"]]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
check_types: if `True` (default) the sequence types of `shallow_tree` and
`input_tree` have to be the same.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`. Only raised if `check_types` is `True`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
if is_sequence(shallow_tree):
if not is_sequence(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if check_types and not isinstance(input_tree, type(shallow_tree)):
raise TypeError(
"The two structures don't have the same sequence type. Input "
"structure has type %s, while shallow structure has type %s."
% (type(input_tree), type(shallow_tree)))
if len(input_tree) != len(shallow_tree):
raise ValueError(
"The two structures don't have the same sequence length. Input "
"structure has length %s, while shallow structure has length %s."
% (len(input_tree), len(shallow_tree)))
for shallow_branch, input_branch in zip(shallow_tree, input_tree):
assert_shallow_structure(shallow_branch, input_branch,
check_types=check_types)
def flatten_up_to(shallow_tree, input_tree):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
assert_shallow_structure(shallow_tree, input_tree)
return list(_yield_flat_up_to(shallow_tree, input_tree))
def map_structure_up_to(shallow_tree, func, *inputs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function therefore will return something with the same base structure as
`shallow_tree`.
Examples:
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with same structure as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
for input_tree in inputs:
assert_shallow_structure(shallow_tree, input_tree)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
all_flattened_up_to = [flatten_up_to(shallow_tree, input_tree)
for input_tree in inputs]
results = [func(*tensors) for tensors in zip(*all_flattened_up_to)]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results)
_allowed_symbols = [
"assert_same_structure",
"is_sequence",
"flatten",
"pack_sequence_as",
"map_structure",
"assert_shallow_structure",
"flatten_up_to",
"map_structure_up_to",
]
remove_undocumented(__name__, _allowed_symbols)
| mit |
hendrycks/robustness | ImageNet-P/test.py | 1 | 12657 | import numpy as np
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision.datasets as dset
import torchvision.transforms as trn
import torchvision.transforms.functional as trn_F
import torchvision.models as models
import torch.utils.model_zoo as model_zoo
from resnext_50_32x4d import resnext_50_32x4d
from resnext_101_32x4d import resnext_101_32x4d
from resnext_101_64x4d import resnext_101_64x4d
from scipy.stats import rankdata
if __package__ is None:
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from utils.video_loader import VideoFolder
parser = argparse.ArgumentParser(description='Evaluates robustness of various nets on ImageNet',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Architecture
parser.add_argument('--model-name', '-m', default='resnet18', type=str,
choices=['alexnet', 'squeezenet1.1', 'vgg11', 'vgg19', 'vggbn',
'densenet121', 'densenet169', 'densenet201', 'densenet161',
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50', 'resnext101', 'resnext101_64'])
parser.add_argument('--perturbation', '-p', default='brightness', type=str,
choices=['gaussian_noise', 'shot_noise', 'motion_blur', 'zoom_blur',
'spatter', 'brightness', 'translate', 'rotate', 'tilt', 'scale',
'speckle_noise', 'gaussian_blur', 'snow', 'shear'])
parser.add_argument('--difficulty', '-d', type=int, default=1, choices=[1, 2, 3])
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
args = parser.parse_args()
print(args)
# /////////////// Model Setup ///////////////
if args.model_name == 'alexnet':
net = models.AlexNet()
net.load_state_dict(model_zoo.load_url('https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
# model_dir='/share/data/lang/users/dan/.torch/models'))
model_dir='/share/data/vision-greg2/pytorch_models/alexnet'))
args.test_bs = 6
elif args.model_name == 'squeezenet1.0':
net = models.SqueezeNet(version=1.0)
net.load_state_dict(model_zoo.load_url('https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
# model_dir='/share/data/lang/users/dan/.torch/models'))
model_dir='/share/data/vision-greg2/pytorch_models/squeezenet'))
args.test_bs = 6
elif args.model_name == 'squeezenet1.1':
net = models.SqueezeNet(version=1.1)
net.load_state_dict(model_zoo.load_url('https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
# model_dir='/share/data/lang/users/dan/.torch/models'))
model_dir='/share/data/vision-greg2/pytorch_models/squeezenet'))
args.test_bs = 6
elif 'vgg' in args.model_name:
if 'bn' not in args.model_name and '11' not in args.model_name:
net = models.vgg19()
net.load_state_dict(model_zoo.load_url('https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
# model_dir='/share/data/lang/users/dan/.torch/models'))
model_dir='/share/data/vision-greg2/pytorch_models/vgg'))
elif '11' in args.model_name:
net = models.vgg11()
net.load_state_dict(model_zoo.load_url('https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
# model_dir='/share/data/lang/users/dan/.torch/models'))
model_dir='/share/data/vision-greg2/pytorch_models/vgg'))
else:
net = models.vgg19_bn()
net.load_state_dict(model_zoo.load_url('https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
# model_dir='/share/data/lang/users/dan/.torch/models'))
model_dir='/share/data/vision-greg2/pytorch_models/vgg'))
args.test_bs = 2
elif args.model_name == 'densenet121':
net = models.densenet121()
import re
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls.
# This pattern is used to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url('https://download.pytorch.org/models/densenet121-a639ec97.pth',
model_dir='/share/data/vision-greg2/pytorch_models/densenet')
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
net.load_state_dict(state_dict)
args.test_bs = 5
elif args.model_name == 'densenet161':
net = models.densenet161()
import re
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = model_zoo.load_url('https://download.pytorch.org/models/densenet161-8d451a50.pth',
model_dir='/share/data/vision-greg2/pytorch_models/densenet')
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
net.load_state_dict(state_dict)
args.test_bs = 3
elif args.model_name == 'resnet18':
net = models.resnet18()
net.load_state_dict(model_zoo.load_url('https://download.pytorch.org/models/resnet18-5c106cde.pth',
# model_dir='/share/data/lang/users/dan/.torch/models'))
model_dir='/share/data/vision-greg2/pytorch_models/resnet'))
args.test_bs = 5
elif args.model_name == 'resnet34':
net = models.resnet34()
net.load_state_dict(model_zoo.load_url('https://download.pytorch.org/models/resnet34-333f7ec4.pth',
# model_dir='/share/data/lang/users/dan/.torch/models'))
model_dir='/share/data/vision-greg2/pytorch_models/resnet'))
args.test_bs = 4
elif args.model_name == 'resnet50':
net = models.resnet50()
net.load_state_dict(model_zoo.load_url('https://download.pytorch.org/models/resnet50-19c8e357.pth',
# model_dir='/share/data/lang/users/dan/.torch/models'))
model_dir='/share/data/vision-greg2/pytorch_models/resnet'))
args.test_bs = 4
elif args.model_name == 'resnet101':
net = models.resnet101()
net.load_state_dict(model_zoo.load_url('https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
# model_dir='/share/data/lang/users/dan/.torch/models'))
model_dir='/share/data/vision-greg2/pytorch_models/resnet'))
args.test_bs = 3
elif args.model_name == 'resnet152':
net = models.resnet152()
net.load_state_dict(model_zoo.load_url('https://download.pytorch.org/models/resnet152-b121ed2d.pth',
# model_dir='/share/data/lang/users/dan/.torch/models'))
model_dir='/share/data/vision-greg2/pytorch_models/resnet'))
args.test_bs = 3
elif args.model_name == 'resnext50':
net = resnext_50_32x4d
# net.load_state_dict(torch.load('/share/data/lang/users/dan/.torch/models/resnext_50_32x4d.pth'))
net.load_state_dict(torch.load('/share/data/vision-greg2/pytorch_models/resnext_50_32x4d.pth'))
args.test_bs = 3
elif args.model_name == 'resnext101':
net = resnext_101_32x4d
# net.load_state_dict(torch.load('/share/data/lang/users/dan/.torch/models/resnext_101_32x4d.pth'))
net.load_state_dict(torch.load('/share/data/vision-greg2/pytorch_models/resnext_101_32x4d.pth'))
args.test_bs = 3
elif args.model_name == 'resnext101_64':
net = resnext_101_64x4d
# net.load_state_dict(torch.load('/share/data/lang/users/dan/.torch/models/resnext_101_64x4d.pth'))
net.load_state_dict(torch.load('/share/data/vision-greg2/pytorch_models/resnext_101_64x4d.pth'))
args.test_bs = 3
args.prefetch = 4
if args.ngpu > 1:
net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))
if args.ngpu > 0:
net.cuda()
torch.manual_seed(1)
np.random.seed(1)
if args.ngpu > 0:
torch.cuda.manual_seed(1)
net.eval()
cudnn.benchmark = True # fire on all cylinders
print('Model Loaded\n')
# /////////////// Data Loader ///////////////
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
if args.difficulty > 1 and 'noise' in args.perturbation:
loader = torch.utils.data.DataLoader(
VideoFolder(root="/share/data/vision-greg2/users/dan/datasets/ImageNet-P/" +
args.perturbation + '_' + str(args.difficulty),
transform=trn.Compose([trn.ToTensor(), trn.Normalize(mean, std)])),
batch_size=args.test_bs, shuffle=False, num_workers=5, pin_memory=True)
else:
loader = torch.utils.data.DataLoader(
VideoFolder(root="/share/data/vision-greg2/users/dan/datasets/ImageNet-P/" + args.perturbation,
transform=trn.Compose([trn.ToTensor(), trn.Normalize(mean, std)])),
batch_size=args.test_bs, shuffle=False, num_workers=5, pin_memory=True)
print('Data Loaded\n')
# /////////////// Stability Measurements ///////////////
identity = np.asarray(range(1, 1001))
cum_sum_top5 = np.cumsum(np.asarray([0] + [1] * 5 + [0] * (999 - 5)))
recip = 1./identity
# def top5_dist(sigma):
# result = 0
# for i in range(1,6):
# for j in range(min(sigma[i-1], i) + 1, max(sigma[i-1], i) + 1):
# if 1 <= j - 1 <= 5:
# result += 1
# return result
def dist(sigma, mode='top5'):
if mode == 'top5':
return np.sum(np.abs(cum_sum_top5[:5] - cum_sum_top5[sigma-1][:5]))
elif mode == 'zipf':
return np.sum(np.abs(recip - recip[sigma-1])*recip)
def ranking_dist(ranks, noise_perturbation=True if 'noise' in args.perturbation else False, mode='top5'):
result = 0
step_size = 1 if noise_perturbation else args.difficulty
for vid_ranks in ranks:
result_for_vid = []
for i in range(step_size):
perm1 = vid_ranks[i]
perm1_inv = np.argsort(perm1)
for rank in vid_ranks[i::step_size][1:]:
perm2 = rank
result_for_vid.append(dist(perm2[perm1_inv], mode))
if not noise_perturbation:
perm1 = perm2
perm1_inv = np.argsort(perm1)
result += np.mean(result_for_vid) / len(ranks)
return result
def flip_prob(predictions, noise_perturbation=True if 'noise' in args.perturbation else False):
result = 0
step_size = 1 if noise_perturbation else args.difficulty
for vid_preds in predictions:
result_for_vid = []
for i in range(step_size):
prev_pred = vid_preds[i]
for pred in vid_preds[i::step_size][1:]:
result_for_vid.append(int(prev_pred != pred))
if not noise_perturbation: prev_pred = pred
result += np.mean(result_for_vid) / len(predictions)
return result
# /////////////// Get Results ///////////////
from tqdm import tqdm
predictions, ranks = [], []
with torch.no_grad():
for data, target in loader:
num_vids = data.size(0)
data = data.view(-1,3,224,224).cuda()
output = net(data)
for vid in output.view(num_vids, -1, 1000):
predictions.append(vid.argmax(1).to('cpu').numpy())
ranks.append([np.uint16(rankdata(-frame, method='ordinal')) for frame in vid.to('cpu').numpy()])
ranks = np.asarray(ranks)
print('Computing Metrics\n')
print('Flipping Prob\t{:.5f}'.format(flip_prob(predictions)))
print('Top5 Distance\t{:.5f}'.format(ranking_dist(ranks, mode='top5')))
print('Zipf Distance\t{:.5f}'.format(ranking_dist(ranks, mode='zipf')))
| apache-2.0 |
Titan-C/scikit-learn | sklearn/linear_model/sag.py | 30 | 12959 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import make_dataset
from .sag_fast import sag
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept,
n_samples=None,
is_saga=False):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
n_samples : int, optional
Number of rows in X. Useful if is_saga=True.
is_saga : boolean, optional
Whether to return step size for the SAGA algorithm or the SAG
algorithm.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
"""
if loss in ('log', 'multinomial'):
L = (0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
L = max_squared_sum + int(fit_intercept) + alpha_scaled
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
if is_saga:
# SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
# See Defazio et al. 2014
mun = min(2 * n_samples * alpha_scaled, L)
step = 1. / (2 * L + mun)
else:
# SAG theoretical step size is 1/16L but it is recommended to use 1 / L
# see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
# slide 65
step = 1. / L
return step
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., beta=0.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None,
is_saga=False):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter : int, optional
The max number of passes over the training data if the stopping
criteria is not reached. Defaults to 1000.
tol : double, optional
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose : integer, optional
The verbosity level.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
is_saga : boolean, optional
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
better in the first epochs, and allow for l1 regularisation.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
beta_scaled = float(beta) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept, n_samples=n_samples,
is_saga=is_saga)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
beta_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
is_saga,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
| bsd-3-clause |
cybernet14/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 267 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
voxlol/scikit-learn | sklearn/utils/class_weight.py | 139 | 7206 | # Authors: Andreas Mueller
# Manoj Kumar
# License: BSD 3 clause
import warnings
import numpy as np
from ..externals import six
from ..utils.fixes import in1d
from .fixes import bincount
def compute_class_weight(class_weight, classes, y):
"""Estimate class weights for unbalanced datasets.
Parameters
----------
class_weight : dict, 'balanced' or None
If 'balanced', class weights will be given by
``n_samples / (n_classes * np.bincount(y))``.
If a dictionary is given, keys are classes and values
are corresponding class weights.
If None is given, the class weights will be uniform.
classes : ndarray
Array of the classes occurring in the data, as given by
``np.unique(y_org)`` with ``y_org`` the original class labels.
y : array-like, shape (n_samples,)
Array of original class labels per sample;
Returns
-------
class_weight_vect : ndarray, shape (n_classes,)
Array with class_weight_vect[i] the weight for i-th class
References
----------
The "balanced" heuristic is inspired by
Logistic Regression in Rare Events Data, King, Zen, 2001.
"""
# Import error caused by circular imports.
from ..preprocessing import LabelEncoder
if class_weight is None or len(class_weight) == 0:
# uniform class weights
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
elif class_weight in ['auto', 'balanced']:
# Find the weight of each class as present in y.
le = LabelEncoder()
y_ind = le.fit_transform(y)
if not all(np.in1d(classes, le.classes_)):
raise ValueError("classes should have valid labels that are in y")
# inversely proportional to the number of samples in the class
if class_weight == 'auto':
recip_freq = 1. / bincount(y_ind)
weight = recip_freq[le.transform(classes)] / np.mean(recip_freq)
warnings.warn("The class_weight='auto' heuristic is deprecated in"
" favor of a new heuristic class_weight='balanced'."
" 'auto' will be removed in 0.18", DeprecationWarning)
else:
recip_freq = len(y) / (len(le.classes_) *
bincount(y_ind).astype(np.float64))
weight = recip_freq[le.transform(classes)]
else:
# user-defined dictionary
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
if not isinstance(class_weight, dict):
raise ValueError("class_weight must be dict, 'auto', or None,"
" got: %r" % class_weight)
for c in class_weight:
i = np.searchsorted(classes, c)
if classes[i] != c:
raise ValueError("Class label %d not present." % c)
else:
weight[i] = class_weight[c]
return weight
def compute_sample_weight(class_weight, y, indices=None):
"""Estimate sample weights by class for unbalanced datasets.
Parameters
----------
class_weight : dict, list of dicts, "balanced", or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data:
``n_samples / (n_classes * np.bincount(y))``.
For multi-output, the weights of each column of y will be multiplied.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Array of original class labels per sample.
indices : array-like, shape (n_subsample,), or None
Array of indices to be used in a subsample. Can be of length less than
n_samples in the case of a subsample, or equal to n_samples in the
case of a bootstrap subsample with repeated indices. If None, the
sample weight will be calculated over the full sample. Only "auto" is
supported for class_weight if this is provided.
Returns
-------
sample_weight_vect : ndarray, shape (n_samples,)
Array with sample weights as applied to the original y
"""
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
if isinstance(class_weight, six.string_types):
if class_weight not in ['balanced', 'auto']:
raise ValueError('The only valid preset for class_weight is '
'"balanced". Given "%s".' % class_weight)
elif (indices is not None and
not isinstance(class_weight, six.string_types)):
raise ValueError('The only valid class_weight for subsampling is '
'"balanced". Given "%s".' % class_weight)
elif n_outputs > 1:
if (not hasattr(class_weight, "__iter__") or
isinstance(class_weight, dict)):
raise ValueError("For multi-output, class_weight should be a "
"list of dicts, or a valid string.")
if len(class_weight) != n_outputs:
raise ValueError("For multi-output, number of elements in "
"class_weight should match number of outputs.")
expanded_class_weight = []
for k in range(n_outputs):
y_full = y[:, k]
classes_full = np.unique(y_full)
classes_missing = None
if class_weight in ['balanced', 'auto'] or n_outputs == 1:
class_weight_k = class_weight
else:
class_weight_k = class_weight[k]
if indices is not None:
# Get class weights for the subsample, covering all classes in
# case some labels that were present in the original data are
# missing from the sample.
y_subsample = y[indices, k]
classes_subsample = np.unique(y_subsample)
weight_k = np.choose(np.searchsorted(classes_subsample,
classes_full),
compute_class_weight(class_weight_k,
classes_subsample,
y_subsample),
mode='clip')
classes_missing = set(classes_full) - set(classes_subsample)
else:
weight_k = compute_class_weight(class_weight_k,
classes_full,
y_full)
weight_k = weight_k[np.searchsorted(classes_full, y_full)]
if classes_missing:
# Make missing classes' weight zero
weight_k[in1d(y_full, list(classes_missing))] = 0.
expanded_class_weight.append(weight_k)
expanded_class_weight = np.prod(expanded_class_weight,
axis=0,
dtype=np.float64)
return expanded_class_weight
| bsd-3-clause |
beardeer/prediction-wrapper | metric_wrappers.py | 1 | 2296 | """Useful performance metric
"""
# Author: Xiaolu Xiong <beardeer@gmail.com>
from abc import ABCMeta, abstractmethod
from scipy import stats
from sklearn import metrics
class MetricWrapper(object):
"""The abstract class to build performance metrics
"""
__metaclass__ = ABCMeta
@classmethod
@abstractmethod
def measure(cls, label, prediction):
"""The abstract class method to measure the performance of predictions
Parameters
----------
label : array
Lable data
prediction : array
Prediction data
Raises
------
NotImplementedError
"""
raise NotImplementedError()
class RSquare(MetricWrapper):
"""The implementation of simple coefficient of determination.
It is the square of pearson correlation.
"""
@classmethod
def measure(cls, label, prediction):
"""Measure the simple coefficient of determination.
Parameters
----------
label : array
Lable data
prediction : array
Prediction data
Returns
-------
float
The square of pearson correlation between label and prediction
"""
pearson, _ = stats.pearsonr(label, prediction)
return pearson**2
class AUC(MetricWrapper):
"""The implementation of Area under the ROC curve.
"""
@classmethod
def measure(cls, label, prediction):
"""Measure the AUC.
Parameters
----------
label : array
Lable data
prediction : array
Prediction data
Returns
-------
float
The AUC value between label and prediction
"""
return metrics.roc_auc_score(label, prediction)
class RMSE(MetricWrapper):
"""The implementation of root-mean-square error.
"""
@classmethod
def measure(cls, label, prediction):
"""Measure the RMSE
Parameters
----------
label : array
Lable data
prediction : array
Prediction data
Returns
-------
float
The RMSE value of lable and prediction
"""
return metrics.mean_squared_error(label, prediction)**0.5
| mit |
aelaguiz/pyvotune | samples/util/pickler.py | 1 | 1908 | # -*- coding: utf-8 -*-
import pyvotune
import collections
import pyvotune.sklearn
import random
import copy
import sys
try:
import cPickle as pickle
except ImportError:
import pickle
log = pyvotune.log.logger()
def reproduce(offspring_cs, variator, rng, args):
if isinstance(variator, collections.Iterable):
for op in variator:
offspring_cs = op(random=rng, candidates=offspring_cs, args=args)
return offspring_cs
else:
return [variator(random=rng, candidates=offspring_cs, args=args)]
if __name__ == '__main__':
pyvotune.set_debug(True)
# Dummy data
n_features = 28 * 28
rng = random.Random()
#################################
# Initialize PyvoTune Generator #
#################################
gen = pyvotune.Generate(
initial_state={
'sparse': False
},
gene_pool=pyvotune.sklearn.get_classifiers(n_features, rng) +
pyvotune.sklearn.get_decomposers(n_features, rng) +
pyvotune.sklearn.get_image_features(n_features, rng) +
pyvotune.sklearn.get_preprocessors(n_features, rng),
max_length=4,
noop_frequency=0.2,
rng=rng)
args = {
'crossover_rate': 0.5,
'mutation_rate': 0.3,
'pyvotune_generator': gen
}
# Use PyvoTun variators
variators = [
pyvotune.variators.random_reset_mutation,
pyvotune.variators.param_reset_mutation,
pyvotune.variators.scramble_mutation,
pyvotune.variators.uniform_crossover,
pyvotune.variators.n_point_crossover
]
genome = gen.generate(max_retries=150)
print genome
p_genome = pickle.dumps(genome)
print p_genome
u_genome = pickle.loads(p_genome)
print pyvotune.util.side_by_side([genome, u_genome], 50)
if genome == u_genome:
print "EQUAL"
else:
print "NOT EQUAL"
| mit |
voxlol/scikit-learn | examples/cluster/plot_segmentation_toy.py | 257 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
Titan-C/scikit-learn | sklearn/model_selection/tests/test_search.py | 5 | 51772 | """Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from sklearn.externals.joblib._compat import PY3_OR_LATER
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge, SGDClassifier
from sklearn.model_selection.tests.common import OneTimeSplitter
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.classes_ = np.unique(Y)
return self
def predict(self, T):
return T.shape[0]
def transform(self, X):
return X + self.foo_param
def inverse_transform(self, X):
return X - self.foo_param
predict_proba = predict
predict_log_proba = predict
decision_function = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
assert_array_equal(grid_search.cv_results_["param_foo_param"].data,
[1, 2, 3])
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
def check_hyperparameter_searcher_with_fit_params(klass, **klass_kwargs):
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam', 'eggs'])
searcher = klass(clf, {'foo_param': [1, 2, 3]}, cv=2, **klass_kwargs)
# The CheckingClassifer generates an assertion error if
# a parameter is missing or has length != len(X).
assert_raise_message(AssertionError,
"Expected fit parameter(s) ['eggs'] not seen.",
searcher.fit, X, y, spam=np.ones(10))
assert_raise_message(AssertionError,
"Fit parameter spam has length 1; expected 4.",
searcher.fit, X, y, spam=np.ones(1),
eggs=np.zeros(10))
searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10))
def test_grid_search_with_fit_params():
check_hyperparameter_searcher_with_fit_params(GridSearchCV)
def test_random_search_with_fit_params():
check_hyperparameter_searcher_with_fit_params(RandomizedSearchCV, n_iter=1)
def test_grid_search_fit_params_deprecation():
# NOTE: Remove this test in v0.21
# Use of `fit_params` in the class constructor is deprecated,
# but will still work until v0.21.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam'])
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(10)})
assert_warns(DeprecationWarning, grid_search.fit, X, y)
def test_grid_search_fit_params_two_places():
# NOTE: Remove this test in v0.21
# If users try to input fit parameters in both
# the constructor (deprecated use) and the `fit`
# method, we'll ignore the values passed to the constructor.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam'])
# The "spam" array is too short and will raise an
# error in the CheckingClassifier if used.
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(1)})
expected_warning = ('Ignoring fit_params passed as a constructor '
'argument in favor of keyword arguments to '
'the "fit" method.')
assert_warns_message(RuntimeWarning, expected_warning,
grid_search.fit, X, y, spam=np.ones(10))
# Verify that `fit` prefers its own kwargs by giving valid
# kwargs in the constructor and invalid in the method call
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(10)})
assert_raise_message(AssertionError, "Fit parameter spam has length 1",
grid_search.fit, X, y, spam=np.ones(1))
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = search_no_scoring.score(X, y)
score_accuracy = search_accuracy.score(X, y)
score_no_score_auc = search_no_score_method_auc.score(X, y)
score_auc = search_auc.score(X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_groups():
# Check if ValueError (when groups is None) propagates to GridSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
gs.fit, X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_classes__property():
# Test that classes_ property matches best_estimator_.classes_
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
Cs = [.1, 1, 10]
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
grid_search.fit(X, y)
assert_array_equal(grid_search.best_estimator_.classes_,
grid_search.classes_)
# Test that regressors do not have a classes_ attribute
grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]})
grid_search.fit(X, y)
assert_false(hasattr(grid_search, 'classes_'))
# Test that the grid searcher has no classes_ attribute before it's fit
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
assert_false(hasattr(grid_search, 'classes_'))
# Test that the grid searcher has no classes_ attribute without a refit
grid_search = GridSearchCV(LinearSVC(random_state=0),
{'C': Cs}, refit=False)
grid_search.fit(X, y)
assert_false(hasattr(grid_search, 'classes_'))
def test_trivial_cv_results_attr():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_no_refit():
# Test that GSCV can be used for model selection alone without refitting
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(not hasattr(grid_search, "best_estimator_") and
hasattr(grid_search, "best_index_") and
hasattr(grid_search, "best_params_"))
# Make sure the predict/transform etc fns raise meaningfull error msg
for fn_name in ('predict', 'predict_proba', 'predict_log_proba',
'transform', 'inverse_transform'):
assert_raise_message(NotFittedError,
('refit=False. %s is available only after '
'refitting on the best parameters' % fn_name),
getattr(grid_search, fn_name), X)
def test_grid_search_error():
# Test that grid search will capture errors on data with different length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_when_param_grid_includes_range():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = None
if PY3_OR_LATER:
grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)})
else:
grid_search = GridSearchCV(clf, {'foo_param': xrange(1, 4)})
grid_search.fit(X, y)
assert_equal(grid_search.best_estimator_.foo_param, 2)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a sequence"
"(but not a string) or np.ndarray.",
GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a non-empty sequence.",
GridSearchCV, clf, param_dict)
param_dict = {"C": "1,2,3"}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a sequence"
"(but not a string) or np.ndarray.",
GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "cv_results_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='fowlkes_mallows_score')
grid_search.fit(X, y)
# So can FMS ;)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def check_cv_results_array_types(cv_results, param_keys, score_keys):
# Check if the search `cv_results`'s array are of correct types
assert_true(all(isinstance(cv_results[param], np.ma.MaskedArray)
for param in param_keys))
assert_true(all(cv_results[key].dtype == object for key in param_keys))
assert_false(any(isinstance(cv_results[key], np.ma.MaskedArray)
for key in score_keys))
assert_true(all(cv_results[key].dtype == np.float64
for key in score_keys if not key.startswith('rank')))
assert_true(cv_results['rank_test_score'].dtype == np.int32)
def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand):
# Test the search.cv_results_ contains all the required results
assert_array_equal(sorted(cv_results.keys()),
sorted(param_keys + score_keys + ('params',)))
assert_true(all(cv_results[key].shape == (n_cand,)
for key in param_keys + score_keys))
def check_cv_results_grid_scores_consistency(search):
# TODO Remove in 0.20
cv_results = search.cv_results_
res_scores = np.vstack(list([cv_results["split%d_test_score" % i]
for i in range(search.n_splits_)])).T
res_means = cv_results["mean_test_score"]
res_params = cv_results["params"]
n_cand = len(res_params)
grid_scores = assert_warns(DeprecationWarning, getattr,
search, 'grid_scores_')
assert_equal(len(grid_scores), n_cand)
# Check consistency of the structure of grid_scores
for i in range(n_cand):
assert_equal(grid_scores[i].parameters, res_params[i])
assert_array_equal(grid_scores[i].cv_validation_scores,
res_scores[i, :])
assert_array_equal(grid_scores[i].mean_validation_score, res_means[i])
def test_grid_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4,
random_state=42)
n_splits = 3
n_grid_points = 6
params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),
dict(kernel=['poly', ], degree=[1, 2])]
grid_search = GridSearchCV(SVC(), cv=n_splits, iid=False,
param_grid=params)
grid_search.fit(X, y)
grid_search_iid = GridSearchCV(SVC(), cv=n_splits, iid=True,
param_grid=params)
grid_search_iid.fit(X, y)
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_candidates = n_grid_points
for search, iid in zip((grid_search, grid_search_iid), (False, True)):
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check if score and timing are reasonable
assert_true(all(cv_results['rank_test_score'] >= 1))
assert_true(all(cv_results[k] >= 0) for k in score_keys
if k is not 'rank_test_score')
assert_true(all(cv_results[k] <= 1) for k in score_keys
if 'time' not in k and
k is not 'rank_test_score')
# Check cv_results structure
check_cv_results_array_types(cv_results, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates)
# Check masking
cv_results = grid_search.cv_results_
n_candidates = len(grid_search.cv_results_['params'])
assert_true(all((cv_results['param_C'].mask[i] and
cv_results['param_gamma'].mask[i] and
not cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'linear'))
assert_true(all((not cv_results['param_C'].mask[i] and
not cv_results['param_gamma'].mask[i] and
cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'rbf'))
check_cv_results_grid_scores_consistency(search)
def test_random_search_cv_results():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# scipy.stats dists now supports `seed` but we still support scipy 0.12
# which doesn't support the seed. Hence the assertions in the test for
# random_search alone should not depend on randomization.
n_splits = 3
n_search_iter = 30
params = dict(C=expon(scale=10), gamma=expon(scale=0.1))
random_search = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits, iid=False,
param_distributions=params)
random_search.fit(X, y)
random_search_iid = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits, iid=True,
param_distributions=params)
random_search_iid.fit(X, y)
param_keys = ('param_C', 'param_gamma')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_cand = n_search_iter
for search, iid in zip((random_search, random_search_iid), (False, True)):
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check results structure
check_cv_results_array_types(cv_results, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)
# For random_search, all the param array vals should be unmasked
assert_false(any(cv_results['param_C'].mask) or
any(cv_results['param_gamma'].mask))
check_cv_results_grid_scores_consistency(search)
def test_search_iid_param():
# Test the IID parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(SVC(), param_grid={'C': [1, 10]}, cv=cv)
random_search = RandomizedSearchCV(SVC(), n_iter=2,
param_distributions={'C': [1, 10]},
cv=cv)
for search in (grid_search, random_search):
search.fit(X, y)
assert_true(search.iid)
test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'
% s_i][0]
for s_i in range(search.n_splits_)))
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s_i][0]
for s_i in range(search.n_splits_)))
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s_i][0]
for s_i in range(search.n_splits_)))
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
# Test the first candidate
assert_equal(search.cv_results_['param_C'][0], 1)
assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])
assert_array_almost_equal(train_cv_scores, [1, 1])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average and weighted std
expected_test_mean = 1 * 1. / 4. + 1. / 3. * 3. / 4.
expected_test_std = np.sqrt(1. / 4 * (expected_test_mean - 1) ** 2 +
3. / 4 * (expected_test_mean - 1. / 3.) **
2)
assert_almost_equal(test_mean, expected_test_mean)
assert_almost_equal(test_std, expected_test_std)
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert_almost_equal(train_mean, 1)
assert_almost_equal(train_std, 0)
# once with iid=False
grid_search = GridSearchCV(SVC(),
param_grid={'C': [1, 10]},
cv=cv, iid=False)
random_search = RandomizedSearchCV(SVC(), n_iter=2,
param_distributions={'C': [1, 10]},
cv=cv, iid=False)
for search in (grid_search, random_search):
search.fit(X, y)
assert_false(search.iid)
test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'
% s][0]
for s in range(search.n_splits_)))
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s][0]
for s in range(search.n_splits_)))
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
assert_equal(search.cv_results_['param_C'][0], 1)
# scores are the same as above
assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])
# Unweighted mean/std is used
assert_almost_equal(test_mean, np.mean(test_cv_scores))
assert_almost_equal(test_std, np.std(test_cv_scores))
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert_almost_equal(train_mean, 1)
assert_almost_equal(train_std, 0)
def test_search_cv_results_rank_tie_breaking():
X, y = make_blobs(n_samples=50, random_state=42)
# The two C values are close enough to give similar models
# which would result in a tie of their mean cv-scores
param_grid = {'C': [1, 1.001, 0.001]}
grid_search = GridSearchCV(SVC(), param_grid=param_grid)
random_search = RandomizedSearchCV(SVC(), n_iter=3,
param_distributions=param_grid)
for search in (grid_search, random_search):
search.fit(X, y)
cv_results = search.cv_results_
# Check tie breaking strategy -
# Check that there is a tie in the mean scores between
# candidates 1 and 2 alone
assert_almost_equal(cv_results['mean_test_score'][0],
cv_results['mean_test_score'][1])
assert_almost_equal(cv_results['mean_train_score'][0],
cv_results['mean_train_score'][1])
try:
assert_almost_equal(cv_results['mean_test_score'][1],
cv_results['mean_test_score'][2])
except AssertionError:
pass
try:
assert_almost_equal(cv_results['mean_train_score'][1],
cv_results['mean_train_score'][2])
except AssertionError:
pass
# 'min' rank should be assigned to the tied candidates
assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3])
def test_search_cv_results_none_param():
X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1]
estimators = (DecisionTreeRegressor(), DecisionTreeClassifier())
est_parameters = {"random_state": [0, None]}
cv = KFold(random_state=0)
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv).fit(X, y)
assert_array_equal(grid_search.cv_results_['param_random_state'],
[0, None])
@ignore_warnings()
def test_search_cv_timing():
svc = LinearSVC(random_state=0)
X = [[1, ], [2, ], [3, ], [4, ]]
y = [0, 1, 1, 0]
gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0)
rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2)
for search in (gs, rs):
search.fit(X, y)
for key in ['mean_fit_time', 'std_fit_time']:
# NOTE The precision of time.time in windows is not high
# enough for the fit/score times to be non-zero for trivial X and y
assert_true(np.all(search.cv_results_[key] >= 0))
assert_true(np.all(search.cv_results_[key] < 1))
for key in ['mean_score_time', 'std_score_time']:
assert_true(search.cv_results_[key][1] >= 0)
assert_true(search.cv_results_[key][0] == 0.0)
assert_true(np.all(search.cv_results_[key] < 1))
def test_grid_search_correct_score_results():
# test that correct scores are used
n_splits = 3
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score, cv=n_splits)
cv_results = grid_search.fit(X, y).cv_results_
# Test scorer names
result_keys = list(cv_results.keys())
expected_keys = (("mean_test_score", "rank_test_score") +
tuple("split%d_test_score" % cv_i
for cv_i in range(n_splits)))
assert_true(all(np.in1d(expected_keys, result_keys)))
cv = StratifiedKFold(n_splits=n_splits)
n_splits = grid_search.n_splits_
for candidate_i, C in enumerate(Cs):
clf.set_params(C=C)
cv_scores = np.array(
list(grid_search.cv_results_['split%d_test_score'
% s][candidate_i]
for s in range(n_splits)))
for i, (train, test) in enumerate(cv.split(X, y)):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, cv_scores[i])
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
grid_search_pickled = pickle.loads(pickle.dumps(grid_search))
assert_array_almost_equal(grid_search.predict(X),
grid_search_pickled.predict(X))
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
random_search_pickled = pickle.loads(pickle.dumps(random_search))
assert_array_almost_equal(random_search.predict(X),
random_search_pickled.predict(X))
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
res_params = grid_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
grid_search.cv_results_['split%d_test_score' % i][cand_i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
res_params = random_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
random_search.cv_results_['split%d_test_score'
% i][cand_i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
def get_cand_scores(i):
return np.array(list(gs.cv_results_['split%d_test_score' % s][i]
for s in range(gs.n_splits_)))
assert all((np.all(get_cand_scores(cand_i) == 0.0)
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER))
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
assert all(np.all(np.isnan(get_cand_scores(cand_i)))
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
def test_stochastic_gradient_loss_param():
# Make sure the predict_proba works when loss is specified
# as one of the parameters in the param_grid.
param_grid = {
'loss': ['log'],
}
X = np.arange(24).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid)
# When the estimator is not fitted, `predict_proba` is not available as the
# loss is 'hinge'.
assert_false(hasattr(clf, "predict_proba"))
clf.fit(X, y)
clf.predict_proba(X)
clf.predict_log_proba(X)
# Make sure `predict_proba` is not available when setting loss=['hinge']
# in param_grid
param_grid = {
'loss': ['hinge'],
}
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid)
assert_false(hasattr(clf, "predict_proba"))
clf.fit(X, y)
assert_false(hasattr(clf, "predict_proba"))
def test_search_train_scores_set_to_false():
X = np.arange(6).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = LinearSVC(random_state=0)
gs = GridSearchCV(clf, param_grid={'C': [0.1, 0.2]},
return_train_score=False)
gs.fit(X, y)
def test_grid_search_cv_splits_consistency():
# Check if a one time iterable is accepted as a cv parameter.
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=n_samples, random_state=0)
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples))
gs.fit(X, y)
gs2 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits))
gs2.fit(X, y)
def _pop_time_keys(cv_results):
for key in ('mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time'):
cv_results.pop(key)
return cv_results
# OneTimeSplitter is a non-re-entrant cv where split can be called only
# once if ``cv.split`` is called once per param setting in GridSearchCV.fit
# the 2nd and 3rd parameter will not be evaluated as no train/test indices
# will be generated for the 2nd and subsequent cv.split calls.
# This is a check to make sure cv.split is not called once per param
# setting.
np.testing.assert_equal(_pop_time_keys(gs.cv_results_),
_pop_time_keys(gs2.cv_results_))
# Check consistency of folds across the parameters
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.1, 0.2, 0.2]},
cv=KFold(n_splits=n_splits, shuffle=True))
gs.fit(X, y)
# As the first two param settings (C=0.1) and the next two param
# settings (C=0.2) are same, the test and train scores must also be
# same as long as the same train/test indices are generated for all
# the cv splits, for both param setting
for score_type in ('train', 'test'):
per_param_scores = {}
for param_i in range(4):
per_param_scores[param_i] = list(
gs.cv_results_['split%d_%s_score' % (s, score_type)][param_i]
for s in range(5))
assert_array_almost_equal(per_param_scores[0],
per_param_scores[1])
assert_array_almost_equal(per_param_scores[2],
per_param_scores[3])
def test_transform_inverse_transform_round_trip():
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
grid_search.fit(X, y)
X_round_trip = grid_search.inverse_transform(grid_search.transform(X))
assert_array_equal(X, X_round_trip)
| bsd-3-clause |
imaculate/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 377 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
robintw/scikit-image | skimage/segmentation/random_walker_segmentation.py | 19 | 20435 | """
Random walker segmentation algorithm
from *Random walks for image segmentation*, Leo Grady, IEEE Trans
Pattern Anal Mach Intell. 2006 Nov;28(11):1768-83.
Installing pyamg and using the 'cg_mg' mode of random_walker improves
significantly the performance.
"""
import warnings
import numpy as np
from scipy import sparse, ndimage as ndi
# executive summary for next code block: try to import umfpack from
# scipy, but make sure not to raise a fuss if it fails since it's only
# needed to speed up a few cases.
# See discussions at:
# https://groups.google.com/d/msg/scikit-image/FrM5IGP6wh4/1hp-FtVZmfcJ
# http://stackoverflow.com/questions/13977970/ignore-exceptions-printed-to-stderr-in-del/13977992?noredirect=1#comment28386412_13977992
try:
from scipy.sparse.linalg.dsolve import umfpack
old_del = umfpack.UmfpackContext.__del__
def new_del(self):
try:
old_del(self)
except AttributeError:
pass
umfpack.UmfpackContext.__del__ = new_del
UmfpackContext = umfpack.UmfpackContext()
except:
UmfpackContext = None
try:
from pyamg import ruge_stuben_solver
amg_loaded = True
except ImportError:
amg_loaded = False
from scipy.sparse.linalg import cg
from ..util import img_as_float
from ..filters import rank_order
#-----------Laplacian--------------------
def _make_graph_edges_3d(n_x, n_y, n_z):
"""Returns a list of edges for a 3D image.
Parameters
----------
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction
n_z: integer
The size of the grid in the z direction
Returns
-------
edges : (2, N) ndarray
with the total number of edges::
N = n_x * n_y * (nz - 1) +
n_x * (n_y - 1) * nz +
(n_x - 1) * n_y * nz
Graph edges with each column describing a node-id pair.
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_weights_3d(data, spacing, beta=130, eps=1.e-6,
multichannel=False):
# Weight calculation is main difference in multispectral version
# Original gradient**2 replaced with sum of gradients ** 2
gradients = 0
for channel in range(0, data.shape[-1]):
gradients += _compute_gradients_3d(data[..., channel],
spacing) ** 2
# All channels considered together in this standard deviation
beta /= 10 * data.std()
if multichannel:
# New final term in beta to give == results in trivial case where
# multiple identical spectra are passed.
beta /= np.sqrt(data.shape[-1])
gradients *= beta
weights = np.exp(- gradients)
weights += eps
return weights
def _compute_gradients_3d(data, spacing):
gr_deep = np.abs(data[:, :, :-1] - data[:, :, 1:]).ravel() / spacing[2]
gr_right = np.abs(data[:, :-1] - data[:, 1:]).ravel() / spacing[1]
gr_down = np.abs(data[:-1] - data[1:]).ravel() / spacing[0]
return np.r_[gr_deep, gr_right, gr_down]
def _make_laplacian_sparse(edges, weights):
"""
Sparse implementation
"""
pixel_nb = edges.max() + 1
diag = np.arange(pixel_nb)
i_indices = np.hstack((edges[0], edges[1]))
j_indices = np.hstack((edges[1], edges[0]))
data = np.hstack((-weights, -weights))
lap = sparse.coo_matrix((data, (i_indices, j_indices)),
shape=(pixel_nb, pixel_nb))
connect = - np.ravel(lap.sum(axis=1))
lap = sparse.coo_matrix(
(np.hstack((data, connect)), (np.hstack((i_indices, diag)),
np.hstack((j_indices, diag)))),
shape=(pixel_nb, pixel_nb))
return lap.tocsr()
def _clean_labels_ar(X, labels, copy=False):
X = X.astype(labels.dtype)
if copy:
labels = np.copy(labels)
labels = np.ravel(labels)
labels[labels == 0] = X
return labels
def _buildAB(lap_sparse, labels):
"""
Build the matrix A and rhs B of the linear system to solve.
A and B are two block of the laplacian of the image graph.
"""
labels = labels[labels >= 0]
indices = np.arange(labels.size)
unlabeled_indices = indices[labels == 0]
seeds_indices = indices[labels > 0]
# The following two lines take most of the time in this function
B = lap_sparse[unlabeled_indices][:, seeds_indices]
lap_sparse = lap_sparse[unlabeled_indices][:, unlabeled_indices]
nlabels = labels.max()
rhs = []
for lab in range(1, nlabels + 1):
mask = (labels[seeds_indices] == lab)
fs = sparse.csr_matrix(mask)
fs = fs.transpose()
rhs.append(B * fs)
return lap_sparse, rhs
def _mask_edges_weights(edges, weights, mask):
"""
Remove edges of the graph connected to masked nodes, as well as
corresponding weights of the edges.
"""
mask0 = np.hstack((mask[:, :, :-1].ravel(), mask[:, :-1].ravel(),
mask[:-1].ravel()))
mask1 = np.hstack((mask[:, :, 1:].ravel(), mask[:, 1:].ravel(),
mask[1:].ravel()))
ind_mask = np.logical_and(mask0, mask1)
edges, weights = edges[:, ind_mask], weights[ind_mask]
max_node_index = edges.max()
# Reassign edges labels to 0, 1, ... edges_number - 1
order = np.searchsorted(np.unique(edges.ravel()),
np.arange(max_node_index + 1))
edges = order[edges.astype(np.int64)]
return edges, weights
def _build_laplacian(data, spacing, mask=None, beta=50,
multichannel=False):
l_x, l_y, l_z = tuple(data.shape[i] for i in range(3))
edges = _make_graph_edges_3d(l_x, l_y, l_z)
weights = _compute_weights_3d(data, spacing, beta=beta, eps=1.e-10,
multichannel=multichannel)
if mask is not None:
edges, weights = _mask_edges_weights(edges, weights, mask)
lap = _make_laplacian_sparse(edges, weights)
del edges, weights
return lap
#----------- Random walker algorithm --------------------------------
def random_walker(data, labels, beta=130, mode='bf', tol=1.e-3, copy=True,
multichannel=False, return_full_prob=False, spacing=None):
"""Random walker algorithm for segmentation from markers.
Random walker algorithm is implemented for gray-level or multichannel
images.
Parameters
----------
data : array_like
Image to be segmented in phases. Gray-level `data` can be two- or
three-dimensional; multichannel data can be three- or four-
dimensional (multichannel=True) with the highest dimension denoting
channels. Data spacing is assumed isotropic unless the `spacing`
keyword argument is used.
labels : array of ints, of same shape as `data` without channels dimension
Array of seed markers labeled with different positive integers
for different phases. Zero-labeled pixels are unlabeled pixels.
Negative labels correspond to inactive pixels that are not taken
into account (they are removed from the graph). If labels are not
consecutive integers, the labels array will be transformed so that
labels are consecutive. In the multichannel case, `labels` should have
the same shape as a single channel of `data`, i.e. without the final
dimension denoting channels.
beta : float
Penalization coefficient for the random walker motion
(the greater `beta`, the more difficult the diffusion).
mode : string, available options {'cg_mg', 'cg', 'bf'}
Mode for solving the linear system in the random walker algorithm.
If no preference given, automatically attempt to use the fastest
option available ('cg_mg' from pyamg >> 'cg' with UMFPACK > 'bf').
- 'bf' (brute force): an LU factorization of the Laplacian is
computed. This is fast for small images (<1024x1024), but very slow
and memory-intensive for large images (e.g., 3-D volumes).
- 'cg' (conjugate gradient): the linear system is solved iteratively
using the Conjugate Gradient method from scipy.sparse.linalg. This is
less memory-consuming than the brute force method for large images,
but it is quite slow.
- 'cg_mg' (conjugate gradient with multigrid preconditioner): a
preconditioner is computed using a multigrid solver, then the
solution is computed with the Conjugate Gradient method. This mode
requires that the pyamg module (http://pyamg.org/) is
installed. For images of size > 512x512, this is the recommended
(fastest) mode.
tol : float
tolerance to achieve when solving the linear system, in
cg' and 'cg_mg' modes.
copy : bool
If copy is False, the `labels` array will be overwritten with
the result of the segmentation. Use copy=False if you want to
save on memory.
multichannel : bool, default False
If True, input data is parsed as multichannel data (see 'data' above
for proper input format in this case)
return_full_prob : bool, default False
If True, the probability that a pixel belongs to each of the labels
will be returned, instead of only the most likely label.
spacing : iterable of floats
Spacing between voxels in each spatial dimension. If `None`, then
the spacing between pixels/voxels in each dimension is assumed 1.
Returns
-------
output : ndarray
* If `return_full_prob` is False, array of ints of same shape as
`data`, in which each pixel has been labeled according to the marker
that reached the pixel first by anisotropic diffusion.
* If `return_full_prob` is True, array of floats of shape
`(nlabels, data.shape)`. `output[label_nb, i, j]` is the probability
that label `label_nb` reaches the pixel `(i, j)` first.
See also
--------
skimage.morphology.watershed: watershed segmentation
A segmentation algorithm based on mathematical morphology
and "flooding" of regions from markers.
Notes
-----
Multichannel inputs are scaled with all channel data combined. Ensure all
channels are separately normalized prior to running this algorithm.
The `spacing` argument is specifically for anisotropic datasets, where
data points are spaced differently in one or more spatial dimensions.
Anisotropic data is commonly encountered in medical imaging.
The algorithm was first proposed in *Random walks for image
segmentation*, Leo Grady, IEEE Trans Pattern Anal Mach Intell.
2006 Nov;28(11):1768-83.
The algorithm solves the diffusion equation at infinite times for
sources placed on markers of each phase in turn. A pixel is labeled with
the phase that has the greatest probability to diffuse first to the pixel.
The diffusion equation is solved by minimizing x.T L x for each phase,
where L is the Laplacian of the weighted graph of the image, and x is
the probability that a marker of the given phase arrives first at a pixel
by diffusion (x=1 on markers of the phase, x=0 on the other markers, and
the other coefficients are looked for). Each pixel is attributed the label
for which it has a maximal value of x. The Laplacian L of the image
is defined as:
- L_ii = d_i, the number of neighbors of pixel i (the degree of i)
- L_ij = -w_ij if i and j are adjacent pixels
The weight w_ij is a decreasing function of the norm of the local gradient.
This ensures that diffusion is easier between pixels of similar values.
When the Laplacian is decomposed into blocks of marked and unmarked
pixels::
L = M B.T
B A
with first indices corresponding to marked pixels, and then to unmarked
pixels, minimizing x.T L x for one phase amount to solving::
A x = - B x_m
where x_m = 1 on markers of the given phase, and 0 on other markers.
This linear system is solved in the algorithm using a direct method for
small images, and an iterative method for larger images.
Examples
--------
>>> np.random.seed(0)
>>> a = np.zeros((10, 10)) + 0.2 * np.random.rand(10, 10)
>>> a[5:8, 5:8] += 1
>>> b = np.zeros_like(a)
>>> b[3, 3] = 1 # Marker for first phase
>>> b[6, 6] = 2 # Marker for second phase
>>> random_walker(a, b)
array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 2, 2, 2, 1, 1],
[1, 1, 1, 1, 1, 2, 2, 2, 1, 1],
[1, 1, 1, 1, 1, 2, 2, 2, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=int32)
"""
# Parse input data
if mode is None:
if amg_loaded:
mode = 'cg_mg'
elif UmfpackContext is not None:
mode = 'cg'
else:
mode = 'bf'
if UmfpackContext is None and mode == 'cg':
warnings.warn('"cg" mode will be used, but it may be slower than '
'"bf" because SciPy was built without UMFPACK. Consider'
' rebuilding SciPy with UMFPACK; this will greatly '
'accelerate the conjugate gradient ("cg") solver. '
'You may also install pyamg and run the random_walker '
'function in "cg_mg" mode (see docstring).')
if (labels != 0).all():
warnings.warn('Random walker only segments unlabeled areas, where '
'labels == 0. No zero valued areas in labels were '
'found. Returning provided labels.')
if return_full_prob:
# Find and iterate over valid labels
unique_labels = np.unique(labels)
unique_labels = unique_labels[unique_labels > 0]
out_labels = np.empty(labels.shape + (len(unique_labels),),
dtype=np.bool)
for n, i in enumerate(unique_labels):
out_labels[..., n] = (labels == i)
else:
out_labels = labels
return out_labels
# This algorithm expects 4-D arrays of floats, where the first three
# dimensions are spatial and the final denotes channels. 2-D images have
# a singleton placeholder dimension added for the third spatial dimension,
# and single channel images likewise have a singleton added for channels.
# The following block ensures valid input and coerces it to the correct
# form.
if not multichannel:
if data.ndim < 2 or data.ndim > 3:
raise ValueError('For non-multichannel input, data must be of '
'dimension 2 or 3.')
dims = data.shape # To reshape final labeled result
data = np.atleast_3d(img_as_float(data))[..., np.newaxis]
else:
if data.ndim < 3:
raise ValueError('For multichannel input, data must have 3 or 4 '
'dimensions.')
dims = data[..., 0].shape # To reshape final labeled result
data = img_as_float(data)
if data.ndim == 3: # 2D multispectral, needs singleton in 3rd axis
data = data[:, :, np.newaxis, :]
# Spacing kwarg checks
if spacing is None:
spacing = np.asarray((1.,) * 3)
elif len(spacing) == len(dims):
if len(spacing) == 2: # Need a dummy spacing for singleton 3rd dim
spacing = np.r_[spacing, 1.]
else: # Convert to array
spacing = np.asarray(spacing)
else:
raise ValueError('Input argument `spacing` incorrect, should be an '
'iterable with one number per spatial dimension.')
if copy:
labels = np.copy(labels)
label_values = np.unique(labels)
# Reorder label values to have consecutive integers (no gaps)
if np.any(np.diff(label_values) != 1):
mask = labels >= 0
labels[mask] = rank_order(labels[mask])[0].astype(labels.dtype)
labels = labels.astype(np.int32)
# If the array has pruned zones, be sure that no isolated pixels
# exist between pruned zones (they could not be determined)
if np.any(labels < 0):
filled = ndi.binary_propagation(labels > 0, mask=labels >= 0)
labels[np.logical_and(np.logical_not(filled), labels == 0)] = -1
del filled
labels = np.atleast_3d(labels)
if np.any(labels < 0):
lap_sparse = _build_laplacian(data, spacing, mask=labels >= 0,
beta=beta, multichannel=multichannel)
else:
lap_sparse = _build_laplacian(data, spacing, beta=beta,
multichannel=multichannel)
lap_sparse, B = _buildAB(lap_sparse, labels)
# We solve the linear system
# lap_sparse X = B
# where X[i, j] is the probability that a marker of label i arrives
# first at pixel j by anisotropic diffusion.
if mode == 'cg':
X = _solve_cg(lap_sparse, B, tol=tol,
return_full_prob=return_full_prob)
if mode == 'cg_mg':
if not amg_loaded:
warnings.warn(
"""pyamg (http://pyamg.org/)) is needed to use
this mode, but is not installed. The 'cg' mode will be used
instead.""")
X = _solve_cg(lap_sparse, B, tol=tol,
return_full_prob=return_full_prob)
else:
X = _solve_cg_mg(lap_sparse, B, tol=tol,
return_full_prob=return_full_prob)
if mode == 'bf':
X = _solve_bf(lap_sparse, B,
return_full_prob=return_full_prob)
# Clean up results
if return_full_prob:
labels = labels.astype(np.float)
X = np.array([_clean_labels_ar(Xline, labels, copy=True).reshape(dims)
for Xline in X])
for i in range(1, int(labels.max()) + 1):
mask_i = np.squeeze(labels == i)
X[:, mask_i] = 0
X[i - 1, mask_i] = 1
else:
X = _clean_labels_ar(X + 1, labels).reshape(dims)
return X
def _solve_bf(lap_sparse, B, return_full_prob=False):
"""
solves lap_sparse X_i = B_i for each phase i. An LU decomposition
of lap_sparse is computed first. For each pixel, the label i
corresponding to the maximal X_i is returned.
"""
lap_sparse = lap_sparse.tocsc()
solver = sparse.linalg.factorized(lap_sparse.astype(np.double))
X = np.array([solver(np.array((-B[i]).todense()).ravel())
for i in range(len(B))])
if not return_full_prob:
X = np.argmax(X, axis=0)
return X
def _solve_cg(lap_sparse, B, tol, return_full_prob=False):
"""
solves lap_sparse X_i = B_i for each phase i, using the conjugate
gradient method. For each pixel, the label i corresponding to the
maximal X_i is returned.
"""
lap_sparse = lap_sparse.tocsc()
X = []
for i in range(len(B)):
x0 = cg(lap_sparse, -B[i].todense(), tol=tol)[0]
X.append(x0)
if not return_full_prob:
X = np.array(X)
X = np.argmax(X, axis=0)
return X
def _solve_cg_mg(lap_sparse, B, tol, return_full_prob=False):
"""
solves lap_sparse X_i = B_i for each phase i, using the conjugate
gradient method with a multigrid preconditioner (ruge-stuben from
pyamg). For each pixel, the label i corresponding to the maximal
X_i is returned.
"""
X = []
ml = ruge_stuben_solver(lap_sparse)
M = ml.aspreconditioner(cycle='V')
for i in range(len(B)):
x0 = cg(lap_sparse, -B[i].todense(), tol=tol, M=M, maxiter=30)[0]
X.append(x0)
if not return_full_prob:
X = np.array(X)
X = np.argmax(X, axis=0)
return X
| bsd-3-clause |
IraKorshunova/kaggle-seizure-prediction | linear_models/lda.py | 1 | 3910 | import numpy as np
import json
import os
from pandas import DataFrame
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from sklearn.lda import LDA
import preprocessors.fft as fft
from utils.loader import load_test_data, load_train_data
from utils.config_name_creator import *
from merger import merge_csv_files
from commons import reshape_data
from commons import load_test_labels
def train(subject, data_path, plot=False):
d = load_train_data(data_path, subject)
x, y = d['x'], d['y']
print 'n_preictal', np.sum(y)
print 'n_inetrictal', np.sum(y - 1)
n_channels = x.shape[1]
n_fbins = x.shape[2]
x, y = reshape_data(x, y)
data_scaler = StandardScaler()
x = data_scaler.fit_transform(x)
lda = LDA()
lda.fit(x, y)
coef = lda.scalings_ * lda.coef_[:1].T
channels = []
fbins = []
for c in range(n_channels):
fbins.extend(range(n_fbins)) # 0- delta, 1- theta ...
channels.extend([c] * n_fbins)
if plot:
fig = plt.figure()
for i in range(n_channels):
if n_channels == 24:
fig.add_subplot(4, 6, i)
else:
fig.add_subplot(4, 4, i)
ax = plt.gca()
ax.set_xlim([0, n_fbins])
ax.set_xticks(np.arange(0.5, n_fbins + 0.5, 1))
ax.set_xticklabels(np.arange(0, n_fbins))
max_y = max(abs(coef)) + 0.01
ax.set_ylim([0, max_y])
ax.set_yticks(np.around(np.arange(0, max_y, max_y / 4.0), decimals=1))
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(15)
plt.bar(range(0, n_fbins), abs(coef[i * n_fbins:i * n_fbins + n_fbins]))
fig.suptitle(subject, fontsize=20)
plt.show()
coefs = np.reshape(coef, (n_channels, n_fbins))
return lda, data_scaler, coefs
def predict(subject, model, data_scaler, data_path, submission_path, test_labels, opt_threshold_train):
d = load_test_data(data_path, subject)
x_test, id = d['x'], d['id']
n_test_examples = x_test.shape[0]
n_timesteps = x_test.shape[3]
x_test = reshape_data(x_test)
x_test = data_scaler.transform(x_test)
pred_1m = model.predict_proba(x_test)[:, 1]
pred_10m = np.reshape(pred_1m, (n_test_examples, n_timesteps))
pred_10m = np.mean(pred_10m, axis=1)
ans = zip(id, pred_10m)
df = DataFrame(data=ans, columns=['clip', 'preictal'])
df.to_csv(submission_path + '/' + subject + '.csv', index=False, header=True)
def run_trainer():
with open('SETTINGS.json') as f:
settings_dict = json.load(f)
data_path = settings_dict['path']['processed_data_path'] + '/' + create_fft_data_name(settings_dict)
submission_path = settings_dict['path']['submission_path'] + '/LDA_' + create_fft_data_name(settings_dict)
print data_path
if not os.path.exists(data_path):
fft.run_fft_preprocessor()
if not os.path.exists(submission_path):
os.makedirs(submission_path)
test_labels_path = '/mnt/sda4/CODING/python/kaggle_data/test_labels.csv'
test_labels = load_test_labels(test_labels_path)
subjects = ['Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Dog_5', 'Patient_1', 'Patient_2']
coef_list = []
for subject in subjects:
print '***********************', subject, '***************************'
model, data_scaler, coefs = train(subject, data_path)
predict(subject, model, data_scaler, data_path, submission_path, test_labels[subject]['preictal'])
coef_list.append(coefs)
merge_csv_files(submission_path, subjects, 'submission')
merge_csv_files(submission_path, subjects, 'submission_softmax')
merge_csv_files(submission_path, subjects, 'submission_minmax')
merge_csv_files(submission_path, subjects, 'submission_median')
if __name__ == '__main__':
run_trainer() | mit |
voxlol/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 243 | 1593 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
| bsd-3-clause |
cybernet14/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 229 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
jniediek/mne-python | tutorials/plot_artifacts_correction_ssp.py | 4 | 3026 | """
.. _tut_artifacts_correct_ssp:
Artifact Correction with SSP
============================
"""
import numpy as np
import mne
from mne.datasets import sample
from mne.preprocessing import compute_proj_ecg, compute_proj_eog
# getting some data ready
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True, add_eeg_ref=False)
raw.set_eeg_reference()
raw.pick_types(meg=True, ecg=True, eog=True, stim=True)
##############################################################################
# Compute SSP projections
# -----------------------
projs, events = compute_proj_ecg(raw, n_grad=1, n_mag=1, average=True)
print(projs)
ecg_projs = projs[-2:]
mne.viz.plot_projs_topomap(ecg_projs)
# Now for EOG
projs, events = compute_proj_eog(raw, n_grad=1, n_mag=1, average=True)
print(projs)
eog_projs = projs[-2:]
mne.viz.plot_projs_topomap(eog_projs)
##############################################################################
# Apply SSP projections
# ---------------------
#
# MNE is handling projections at the level of the info,
# so to register them populate the list that you find in the 'proj' field
raw.info['projs'] += eog_projs + ecg_projs
#############################################################################
# Yes this was it. Now MNE will apply the projs on demand at any later stage,
# so watch out for proj parmeters in functions or to it explicitly
# with the ``.apply_proj`` method
#############################################################################
# Demonstrate SSP cleaning on some evoked data
# --------------------------------------------
events = mne.find_events(raw, stim_channel='STI 014')
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
# this can be highly data dependent
event_id = {'auditory/left': 1}
epochs_no_proj = mne.Epochs(raw, events, event_id, tmin=-0.2, tmax=0.5,
proj=False, baseline=(None, 0), reject=reject)
epochs_no_proj.average().plot(spatial_colors=True)
epochs_proj = mne.Epochs(raw, events, event_id, tmin=-0.2, tmax=0.5, proj=True,
baseline=(None, 0), reject=reject)
epochs_proj.average().plot(spatial_colors=True)
##############################################################################
# Looks cool right? It is however often not clear how many components you
# should take and unfortunately this can have bad consequences as can be seen
# interactively using the delayed SSP mode:
evoked = mne.Epochs(raw, events, event_id, tmin=-0.2, tmax=0.5,
proj='delayed', baseline=(None, 0),
reject=reject).average()
# set time instants in seconds (from 50 to 150ms in a step of 10ms)
times = np.arange(0.05, 0.15, 0.01)
evoked.plot_topomap(times, proj='interactive')
##############################################################################
# now you should see checkboxes. Remove a few SSP and see how the auditory
# pattern suddenly drops off
| bsd-3-clause |
mikebenfield/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 267 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
toastedcornflakes/scikit-learn | examples/calibration/plot_compare_calibration.py | 81 | 5012 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probabilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilities to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilities closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
toastedcornflakes/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 369 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
indraforyou/keras_tfrecord | mnist_tfrecord.py | 2 | 3634 | import tensorflow as tf
from keras.datasets import mnist
from keras import backend as K
from keras.models import Model
from keras.layers import Dense, Dropout, Flatten, Input, Convolution2D
from keras.callbacks import EarlyStopping
from keras.objectives import categorical_crossentropy
from keras.utils import np_utils
import keras_tfrecord as ktfr
import time
import numpy as np
sess = tf.Session()
K.set_session(sess)
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train[..., np.newaxis]
X_test = X_test[..., np.newaxis]
def arch(inp):
con1 = Convolution2D(32, 3, 3, border_mode='valid', activation = 'relu', subsample=(2,2))
con2 = Convolution2D(32, 3, 3, activation = 'relu', subsample=(2,2))
fla1 = Flatten()
den1 = Dense(128, activation = 'relu')
den2 = Dense(nb_classes, activation = 'softmax')
out = den2(den1(fla1(con2(con1(inp)))))
# fla1 = Flatten()
# den1 = Dense(128, activation = 'relu')
# den2 = Dense(128, activation = 'relu')
# den3 = Dense(nb_classes, activation = 'softmax')
# out = den3(den2(den1(fla1(inp))))
return out
ktfr.data_to_tfrecord(images=X_train, labels=y_train, filename='train.mnist.tfrecord')
# ktfr.data_to_tfrecord(images=X_test, labels=y_test, filename='test.mnist.tfrecord')
batch_size=32
nb_classes=10
x_train_, y_train_ = ktfr.read_and_decode('train.mnist.tfrecord', one_hot=True, n_class=nb_classes, is_train=True)
x_train_batch, y_train_batch = K.tf.train.shuffle_batch([x_train_, y_train_],
batch_size=batch_size,
capacity=2000,
min_after_dequeue=1000,
num_threads=32) # set the number of threads here
x_train_inp = Input(tensor=x_train_batch)
train_out = arch(x_train_inp)
train_model = Model(input=x_train_inp, output=train_out)
ktfr.compile_tfrecord(train_model, optimizer='rmsprop', loss='categorical_crossentropy', out_tensor_lst=[y_train_batch], metrics=['accuracy'])
train_model.summary()
ktfr.fit_tfrecord(train_model, X_train.shape[0], batch_size, nb_epoch=3)
train_model.save_weights('saved_wt.h5')
K.clear_session()
x_test_inp = Input(batch_shape=(None,)+(X_test.shape[1:]))
test_out = arch(x_test_inp)
test_model = Model(input=x_test_inp, output=test_out)
test_model.load_weights('saved_wt.h5')
test_model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
test_model.summary()
loss, acc = test_model.evaluate(X_test, np_utils.to_categorical(y_test), nb_classes)
print '\nTest accuracy: {0}'.format(acc)
exit()
loss = tf.reduce_mean(categorical_crossentropy(y_train_batch, train_out))
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
# sess.run(tf.global_variables_initializer())
sess.run(tf.initialize_all_variables())
with sess.as_default():
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
step = 0
while not coord.should_stop():
start_time = time.time()
_, loss_value = sess.run([train_op, loss], feed_dict={K.learning_phase(): 0})
duration = time.time() - start_time
if step % 100 == 0:
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value,
duration))
step += 1
except tf.errors.OutOfRangeError:
print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
finally:
coord.request_stop()
coord.join(threads)
sess.close()
| mit |
ottogroup/dstoolbox | dstoolbox/tests/test_cluster.py | 1 | 3375 | """Tests for cluster.py"""
from functools import partial
from unittest.mock import patch
import numpy as np
import pytest
from sklearn.metrics import adjusted_mutual_info_score
class TestHierarchicalClustering:
@pytest.fixture
def clustering(self):
from dstoolbox.cluster import hierarchical_clustering
return partial(
hierarchical_clustering,
criterion='distance',
method='complete',
metric='cosine',
)
@pytest.fixture
def data(self):
return np.array([
[0, 0, 1.0],
[0, 0, 0.9],
[0, -1.0, 0],
[0, -0.9, 0],
[0, -0.5, 0.6],
[1.0, 0, 0],
])
@pytest.mark.parametrize('max_dist, expected', [
(-0.1, [0, 1, 2, 3, 4, 5]),
(0.2, [0, 0, 1, 1, 2, 3]),
(0.7, [0, 0, 1, 1, 0, 2]),
(1.1, [0, 0, 0, 0, 0, 0]),
])
def test_functional(self, clustering, data, max_dist, expected):
y_pred = clustering(data, max_dist=max_dist)
assert adjusted_mutual_info_score(y_pred, expected) == 1
def test_array_empty(self, clustering):
result = clustering(np.zeros((0, 10)))
assert (result == np.array([])).all()
def test_only_1_sample(self, clustering):
result = clustering(np.zeros((1, 10)))
assert (result == np.array([0])).all()
@pytest.fixture
def patched_clustering_cls_and_mocks(self):
with patch('dstoolbox.cluster.linkage') as lk:
with patch('dstoolbox.cluster.fcluster') as fc:
from dstoolbox.cluster import HierarchicalClustering
lk.return_value = 123
fc.side_effect = ['1st_result', '2nd_result']
yield HierarchicalClustering, lk, fc
def test_linkage_tree_call_default(
self, patched_clustering_cls_and_mocks):
hc_cls, lk, fc = patched_clustering_cls_and_mocks
X = np.zeros((2, 2))
hc_cls().fit(X)
assert (lk.call_args_list[0][0][0] == X).all()
assert lk.call_args_list[0][1] == {'method': 'single',
'metric': 'euclidean'}
assert fc.call_args_list[0][0][0] == 123
assert fc.call_args_list[0][1] == {
't': 0.5, 'criterion': 'inconsistent'}
def test_linkage_tree_call_non_default(
self, patched_clustering_cls_and_mocks):
hc_cls, lk, fc = patched_clustering_cls_and_mocks
X = np.zeros((2, 2))
hc_cls(
max_dist=0.111,
criterion='crit',
method='meth',
metric='metr',
).fit(X)
assert (lk.call_args_list[0][0][0] == X).all()
assert lk.call_args_list[0][1] == {'method': 'meth',
'metric': 'metr'}
assert fc.call_args_list[0][0][0] == 123
assert fc.call_args_list[0][1] == {'t': 0.111, 'criterion': 'crit'}
def test_repeated_fit_predict(self, patched_clustering_cls_and_mocks):
model = patched_clustering_cls_and_mocks[0]()
X = np.random.random((100, 5))
result = model.fit_predict(X)
assert result == '1st_result'
assert model.labels_ == '1st_result'
result = model.fit_predict(X)
assert result == '2nd_result'
assert model.labels_ == '2nd_result'
| apache-2.0 |
ottogroup/dstoolbox | dstoolbox/visualization.py | 1 | 2829 | """Helper functions for creating visualizations.
Note:
* The helper functions contain additional dependencies not covered by
dstoolbox.
* They are not covered by tests and thus should only be used for
convenience but not for production purposes.
"""
import io
from sklearn.utils import murmurhash3_32
from dstoolbox.utils import get_nodes_edges
COLORS = [
'#75DF54', '#B3F1A0', '#91E875', '#5DD637', '#3FCD12',
'#4A88B3', '#98C1DE', '#6CA2C8', '#3173A2', '#17649B',
'#FFBB60', '#FFDAA9', '#FFC981', '#FCAC41', '#F29416',
'#C54AAA', '#E698D4', '#D56CBE', '#B72F99', '#B0108D',
]
def _get_shape(est):
if hasattr(est, 'steps'):
shape = 'invhouse'
elif hasattr(est, 'transformer_list'):
shape = 'oval'
else:
shape = 'record'
return shape
def _get_label(name, short_name):
if short_name:
label = name.split('__')[-1]
else:
label = name.split('__')
label[-1] = label[-1].upper()
label = '\n'.join(label)
return label
def _get_hex_color(est):
hashed = int(murmurhash3_32(est)) % len(COLORS)
return COLORS[hashed]
def _make_node(name, est, short_name=True):
"""Create a pydotplus.Node based on an sklearn estimator."""
import pydotplus
label = _get_label(name, short_name=short_name)
label_type = repr(type(est)).strip("'<>").rsplit('.')[-1]
label += f'\n({label_type})'
shape = _get_shape(est)
return pydotplus.Node(
name,
label=label,
shape=shape,
color=_get_hex_color(label_type),
style='filled',
)
def make_graph(name, model, short_name=True):
"""Create a pydotplus graph of an (sklearn) Pipeline.
Parameters
----------
name : string
Name of the model
model : sklearn.pipeline.Pipeline
The (sklearn) Pipeline or FeatureUnion.
short_name : bool (default=True)
Whether to label nodes only by the actual name of the step or by
full name (i.e. the name returned by `get_params`).
Returns
-------
graph : pydotplus.graphviz.Dot
The pydotplus Graph
"""
import pydotplus
nodes, edges = get_nodes_edges(name, model)
graph = pydotplus.Dot('Pipeline', graph_type='digraph')
pydot_nodes = {}
for k, v in nodes.items():
node = _make_node(k, v, short_name=short_name)
graph.add_node(node)
pydot_nodes[k] = node
for edge0, edge1 in edges:
graph.add_edge(
pydotplus.Edge(
pydot_nodes[edge0],
pydot_nodes[edge1],
))
return graph
def save_graph_to_file(graph, filename):
"""Save a visualization of a pydotplus Graph to a file."""
ext = filename.rsplit('.', 1)[-1]
with io.open(filename, 'wb') as f:
f.write(graph.create(format=ext))
| apache-2.0 |
maberyick/RPi-EPOC | EpocArmPi/Library/alpha_work.py | 1 | 3111 | from PyQt4.QtGui import QPalette, QColor
from PyQt4.QtCore import Qt, QObject, pyqtSignal, pyqtSlot
from sklearn import preprocessing
import numpy as np
from scipy.signal import butter, lfilter, welch
from os import getcwd as getdir
from sklearn.externals import joblib
from collections import deque
"""Import from within the files"""
from pyeeg import hjorth_mob
import armbot
class LinkingPath(QObject):
patcher_Alpha = pyqtSignal(int,int)
def __init__(self):
QObject.__init__(self)
def patch_Alpha(self,data1,data2):
self.patcher_Alpha.emit(data1,data2)
class alpha_work():
def __init__(self,obj):
self.epoc_samp = 128.0
self.y = 6500
self.dbuff3=deque([1]*8, 8)
self.dir_user = str(getdir()+'/Profile/')
self.clf_alpha = joblib.load(self.dir_user+'/alphaSVM.pkl')
self.scaler_alpha = joblib.load(self.dir_user+'/alphaScaler.pkl')
self.obj = obj
self.linking = LinkingPath()
self.linking.patcher_Alpha.connect(self.alpha)
self.obj = obj
self.highlght = QPalette.Highlight
self.greeny = QColor(Qt.green)
self.bluey = QColor(Qt.blue)
self.redy = QColor(Qt.red)
self.armb = armbot.arm_bot()
def dc2uV(self,val_):
vaf_ = ( val_ - np.average(val_) )*0.51
return vaf_
def normalz(self,val_):
vaf_ = preprocessing.scale(val_)
return vaf_
def alpha_parametrs(self,val_):
temp_val1 = []; temp_val2 = []
fq_, px_ = welch(val_, nperseg=256, nfft=1023, fs = 128,
noverlap=100, scaling='density'
)
fq1_up = 12.0; fq1_dwn = 8.0
fq2_up = 30.0; fq2_dwn = 4.0
for i in range(len(px_)):
if fq_[i]<=fq1_up and fq_[i]>=fq1_dwn:temp_val1.append(px_[i])
elif fq_[i]<=fq2_up and fq_[i]>=fq2_dwn:temp_val2.append(px_[i])
vaf_eng1 = sum(temp_val1)
vaf_eng2 = sum(temp_val2)
vaf_r = vaf_eng1 / vaf_eng2
vaf_hjorth_mob = (hjorth_mob(val_))*10.0
"""Value = ['Alpha Ratio','Hjorth mobility']"""
vaf_tt = np.array([vaf_r,vaf_hjorth_mob])
return vaf_tt
def filtering(self,data):
samprate = 128
cutlow = 2.0
nyq = samprate/2.0
low = cutlow / nyq
b,a = butter(5,low,btype='highpass',analog=0)
data_f = lfilter(b,a,data)
return data_f
@pyqtSlot()
def alpha(self,val,status):
palette = QPalette()
self.dbuff3.append(val)
acumm = int(self.dbuff3.count(0))
self.obj.pB_Alpha.setValue(acumm)
if status:
palette.setColor(self.highlght, self.greeny)
self.obj.pB_Alpha.setPalette(palette)
if acumm >= 8:
palette.setColor(self.highlght, self.redy)
self.obj.pB_Alpha.setPalette(palette)
self.armb.Alp(True)
else:
palette.setColor(self.highlght, self.bluey)
self.obj.pB_Alpha.setPalette(palette)
def AlphaAction(self,Alpha_val):
if Alpha_val == 0:
self.linking.patch_Alpha(Alpha_val,True)
else:
self.linking.patch_Alpha(Alpha_val,False)
def alpha_processing(self,ch_):
ch_ = self.dc2uV(ch_)
ch_ = self.filtering(ch_)
ch_ = self.normalz(ch_)
param_val = self.alpha_parametrs(ch_)
return param_val
def svm_alphaPro(self,val_):
ch_scl = self.scaler_alpha.transform(self.alpha_processing(val_))
curr_val = self.clf_alpha.predict(ch_scl)
self.AlphaAction(curr_val[0])
| gpl-3.0 |
YihaoLu/statsmodels | statsmodels/datasets/strikes/data.py | 25 | 1951 | #! /usr/bin/env python
"""U.S. Strike Duration Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
This is a subset of the data used in Kennan (1985). It was originally
published by the Bureau of Labor Statistics.
::
Kennan, J. 1985. "The duration of contract strikes in US manufacturing.
`Journal of Econometrics` 28.1, 5-28.
"""
DESCRSHORT = """Contains data on the length of strikes in US manufacturing and
unanticipated industrial production."""
DESCRLONG = """Contains data on the length of strikes in US manufacturing and
unanticipated industrial production. The data is a subset of the data originally
used by Kennan. The data here is data for the months of June only to avoid
seasonal issues."""
#suggested notes
NOTE = """::
Number of observations - 62
Number of variables - 2
Variable name definitions::
duration - duration of the strike in days
iprod - unanticipated industrial production
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the strikes data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the strikes data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/strikes.csv', 'rb'), delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
miguelcleon/ODM2-Admin | odm2admin/views.py | 1 | 173237 | import sys
from pathlib import Path # if you haven't already done so
file = Path(__file__).resolve()
parent, root = file.parent, file.parents[1]
sys.path.append(str(root))
# Additionally remove the current file's directory from sys.path
try:
sys.path.remove(str(parent))
except ValueError: # Already removed
pass
from io import StringIO
from decimal import *
import math
import json
import time
import sys
import os
import subprocess
import re
# import pandas as pd
# import numpy
# from colour import Color
# from celery import shared_task
# import odm2admin.tasks as tasks
from urllib.parse import urlparse
from datetime import datetime
from datetime import timedelta
from time import mktime
from django import template
from django.contrib import admin
from django.db import connection
from django.db.models import Max
from django.db.models import Min
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.http import StreamingHttpResponse
from django.shortcuts import render
from django.template import loader
from django.contrib.auth.decorators import login_required
# from django.contrib.auth.models import User
from django.core.mail import EmailMessage
# from django.core import mail
from django.core.management.base import CommandError
from django.core import serializers
from django.core.management import settings
from templatesAndSettings.settings import exportdb
from django.template.response import TemplateResponse
from django.core.exceptions import ObjectDoesNotExist
# from hs_restclient_helper import get_oauth_hs
from django.core import management
# from oauth2_provider.views.generic import ProtectedResourceView
from django.http import HttpResponse
from django.forms.models import model_to_dict
from django.utils.crypto import get_random_string
# from django.contrib.gis.geos import GEOSGeometry
# import hs_restclient as hs_r
from hs_restclient import HydroShare, HydroShareAuthOAuth2
# from oauthlib.oauth2 import TokenExpiredError
# from oauthlib.oauth2 import InvalidGrantError, InvalidClientError
import requests
# from templatesAndSettings.settings import CUSTOM_TEMPLATE_PATH
# from templatesAndSettings.settings import DATA_DISCLAIMER as DATA_DISCLAIMER
# from templatesAndSettings.settings import MAP_CONFIG as MAP_CONFIG
# from templatesAndSettings.settings import RECAPTCHA_PRIVATE_KEY
from .models import Actions
from .models import Annotations
from .models import Authorlists
from .models import Citationextensionpropertyvalues
from .models import Citations
from .models import CvQualitycode
from .models import CvAnnotationtype
from .models import Dataloggerfiles
from .models import Datasetcitations
from .models import Datasets
from .models import Datasetsresults
from .models import Featureactions
from .models import Methods
from .models import People
from .models import Processinglevels
from .models import Profileresults
from .models import Profileresultvalues
from .models import ProcessDataloggerfile
from .models import Relatedfeatures
from .models import Results
from .models import Samplingfeatureextensionpropertyvalues
from .models import Samplingfeatureexternalidentifiers
from .models import Samplingfeatures
from .models import Sites
from .models import Specimens
from .models import Timeseriesresultvalues
from .models import Timeseriesresultvaluesext
from .models import Timeseriesresultvaluesextwannotations
from .models import Timeseriesresultvalueannotations
from .models import Units
from .models import Variables
from .models import Timeseriesresults
from .models import Resultextensionpropertyvalues
from .models import Extensionproperties
# from .forms import LoginForm
from django.views.decorators.cache import never_cache
from django.contrib.auth import authenticate, login
register = template.Library()
__author__ = 'leonmi'
# class FeatureactionsAutocomplete(autocomplete.Select2QuerySetView):
# def get_queryset(self):
# # Don't forget to filter out results depending on the visitor !
# if not self.request.is_authenticated():
# return Featureactions.objects.none()
#
# qs = Featureactions.objects.all()
#
# if self.q:
# names = FeatureactionsNames.objects.filter(name__icontains=self.q)
# qs = Featureactions.objects.filter(featureactionid=names.values("featureactionid"))
# #qs = qs.filter(__istartswith=self.q)
#
# return self.q
# class CreatePubView(FormView):
# template_name = "publications2.html"
# model = Citations
#
#
# def add_pub(request,citationid='NotSet'):
# if request.user.is_authenticated():
# #if 'citationidnew' in request.POST:
# #if not request.POST['citationidnew'] == '':
# #citationid = int(request.POST['citationidnew'])
# #citationidnew = citationid
# AuthorInlineFormSet = inlineformset_factory(Citations,Authorlists,extra=6)
#
# CitationpropertyInlineFormSet = inlineformset_factory(Citations,
# Citationextensionpropertyvalues)
# #citation_form=CitationsAdminForm(request.POST,instance=citation)
# if request.method=="POST":
# if 'delete_citation' in request.POST:
# citation= Citations.objects.filter(citationid=citationid).get()
# citation.delete()
# return HttpResponseRedirect('../../publications.html')
# if citationid == 'NotSet':
# citation= Citations(title=request.POST['title'],
# publisher=request.POST['publisher'],
# publicationyear=int(request.POST['publicationyear']),citationlink=request.POST['citationlink'])
# #if citation.is_valid():
# citation.save()
# citationid=citation.citationid
# citation_form=CitationsAdminForm(request.POST,instance=citation)
# else:
# citation= Citations.objects.filter(citationid=citationid).get()
# citation_form=CitationsAdminForm(request.POST,instance=citation)
# #citation= Citations.objects.filter(citationid=citationid).get()
# Authorformset=AuthorInlineFormSet(request.POST,instance=citation)
#
# Citationpropertyformset = CitationpropertyInlineFormSet
# (request.POST,instance=citation)
#
# if Authorformset.is_valid():
# try:
# Authorformset.save()
# except IntegrityError:
# pass
# if Citationpropertyformset.is_valid():
# Citationpropertyformset.save()
# #for form in CitationPorpertyformset:
# #if form.changed_data.__len__() > 0:
# #form.save()
# if citation_form.is_valid():
# citation_form.save()
# return HttpResponseRedirect('../../pubview/citationid=' + str(citationid) +'/')
# elif not citationid=='NotSet':
# citation= Citations.objects.filter(citationid=citationid).get()
# Authorformset = AuthorInlineFormSet(instance=citation)
#
# #Authorformset.empty_permitted=False
# Citationpropertyformset = CitationpropertyInlineFormSet(instance=citation)
# #CitationPorpertyformset.empty_permitted=True
# citation_form=CitationsAdminForm(instance=citation)
# else:
# AuthorInlineFormSet = inlineformset_factory(Citations,Authorlists,extra=6)
# CitationpropertyInlineFormSet = inlineformset_factory(Citations,
# Citationextensionpropertyvalues,extra=8)
# Authorformset=AuthorInlineFormSet(instance=Authorlists())
# # i=1
# # for form in Authorformset:
# # form.fields['authororder'].initial = i
# # i+=1
# Citationpropertyformset = CitationpropertyInlineFormSet
# (instance=Citationextensionpropertyvalues())
# citation_form=CitationsAdminForm(instance=Citations())
# citationidnew=''
# i=1
# for form in Authorformset:
# if form.fields['authororder'].initial == None:
# form.fields['authororder'].initial = i
# i+=1
# #for form in Citationpropertyformset:
#
# #if 'propertyid' in form.initial: #not propertyid==None
# #propertyid = form.initial['propertyid'] #.initial #type number
# #extensionprop = Extensionproperties.objects.filter(propertyid=propertyid).get()
# #name = extensionprop.propertydatatypecv
# #typecv = CvPropertydatatype.objects.filter(name=name.name).get()
# #if typecv.name == "Boolean":
# #form.fields['propertyvalue'].widget = widgets.CheckboxInput
# #form.fields['propertyvalue']= models.BooleanField()
# #elif citationid=='NotSet':
#
# #if form.fields['authororder'].initial == None:
#
# return render(request, 'publications3.html', {'Authorformset':Authorformset,
# 'Citationpropertyformset':Citationpropertyformset,'citation_form':citation_form,})
# else:
# return HttpResponseRedirect('../../')
# def add_pub(request,citation='NotSet'):
# #citation_form
# #author_form
# #citation_property_form
# author_forms= []
# citation_property_forms=[]
# if request.method=="POST":
# citation_form=CitationsAdminForm(request.POST,instance=Citations())
# author_forms=[AuthorlistsAdminForm(request.POST,prefix=str(x),
# instance=Authorlists()) for x in range(0,3)]
# citation_property_forms=[CitationextensionpropertyvaluesAdminForm(request.POST,
# prefix=str(x),instance=Citationextensionpropertyvalues())for x in range(0,3)]
# if citation_form.is_valid():
# new_citation= citation_form.save()
# citationid= new_citation.citationid
# for af in author_forms:
# if af.is_valid():
# new_author = af.save(commit=False)
# new_author.citationid = new_citation
# new_author.save()
# for cpf in citation_property_forms:
# if cpf.is_valid():
# new_cepv = cpf.save(commit=False)
# new_cepv.citationid = new_citation
# new_cepv.save()
# return HttpResponseRedirect('/pubview/citationid=' + str(citationid))
# elif not citation=='NotSet':
# citation_form=CitationsAdminForm(instance=Citations.objects.filter(citationid=citation)
# .get())
# authors = Authorlists.objects.filter(citationid=citation)
# for auth in authors:
# author_forms.append(AuthorlistsAdminForm(instance=auth))
# cepvs= Citationextensionpropertyvalues.objects.filter(citationid=citation)
# for cepv in cepvs:
# citation_property_forms.append(CitationextensionpropertyvaluesAdminForm(instance=cepv))
# else:
# citation_form=CitationsAdminForm(instance=Citations())
# author_forms=[AuthorlistsAdminForm(prefix=str(x),
# instance=Authorlists()) for x in range(0,3)]
# citation_property_forms=[CitationextensionpropertyvaluesAdminForm(prefix=str(x),
# instance=Citationextensionpropertyvalues()) for x in range(0,3)]
# return TemplateResponse(request, 'publications2.html',{'citation_form':citation_form,
# 'author_forms':author_forms,'citation_property_forms':citation_property_forms,})
@login_required()
def oauth_view(request, *args, **kwargs):
return HttpResponse('Secret contents!', status=200)
def publications(request):
# if request.user.is_authenticated():
citationList = Citations.objects.all()
authList = Authorlists.objects.all()
peopleList = People.objects.filter(personid__in=authList.values("personid"))
selectedTag = 'CZO Authors'
selectedAuthor = 'All'
if 'filterTags' in request.POST:
if not request.POST['filterTags'] == 'All':
selectedTag = request.POST['filterTags']
if request.POST['filterTags'] == 'CZO Authors':
citationList = Citations.objects.filter(
citationid__in=authList.values("citationid"))
else:
citationList = Citations.objects.filter(publisher__icontains=selectedTag)
else:
selectedTag = 'All'
else:
citationList = Citations.objects.filter(citationid__in=authList.values("citationid"))
if 'selectedAuthor' in request.POST:
if not request.POST['selectedAuthor'] == 'All':
selectedAuthor = int(request.POST['selectedAuthor'])
authored = Authorlists.objects.filter(personid=selectedAuthor)
citationList = citationList.filter(citationid__in=authored.values("citationid"))
filterTags = ['CZO Authors', 'All', 'AGU', 'LCZO Meeting']
citationCategories = Citationextensionpropertyvalues.objects.filter(propertyid=5).distinct(
"propertyvalue") # citation category Extensionproperties
selectedCategory = None
if 'citationCategories' in request.POST:
if not request.POST['citationCategories'] == 'All':
selectedCategory = request.POST['citationCategories']
citationPropValueFilter = Citationextensionpropertyvalues.objects.filter(
propertyvalue__icontains=selectedCategory)
citationList = citationList.filter(
citationid__in=citationPropValueFilter.values("citationid"))
else:
selectedCategory = 'All'
# context = {'prefixpath': CUSTOM_TEMPLATE_PATH}
if 'export_data' in request.POST:
response = exportcitations(request, citationList, True)
return response
if 'export_endnote' in request.POST:
response = exportcitations(request, citationList, False)
return response
return TemplateResponse(request, 'publications.html',
{'citationList': citationList, 'authList': authList,
'filterTags': filterTags,
'citationCategories': citationCategories,
'selectedCategory': selectedCategory,
'selectedTag': selectedTag, 'peopleList': peopleList,
'selectedAuthor': selectedAuthor,
'prefixpath': settings.CUSTOM_TEMPLATE_PATH})
# ======================= SHORTCUTS =========================================
def AddSensor(request):
if request.user.is_authenticated:
context = {'prefixpath': settings.CUSTOM_TEMPLATE_PATH, 'name': request.user,
'authenticated': True, 'site_title': admin.site.site_title,
'site_header': admin.site.site_header,
'short_title': settings.ADMIN_SHORTCUTS[0]['shortcuts'][1]['title']}
return TemplateResponse(request, 'AddSensor.html', context)
else:
return HttpResponseRedirect('../')
@login_required()
def chartIndex(request):
context = {'prefixpath': settings.CUSTOM_TEMPLATE_PATH, 'name': request.user,
'authenticated': True, 'site_title': admin.site.site_title,
'site_header': admin.site.site_header,
'featureaction': settings.SENSOR_DASHBOARD['featureactionids'][0],
'short_title': settings.ADMIN_SHORTCUTS[0]['shortcuts'][5]['title']}
return TemplateResponse(request, 'chartIndex.html', context)
# chartIndex
def AddProfile(request):
if request.user.is_authenticated:
context = {'prefixpath': settings.CUSTOM_TEMPLATE_PATH, 'name': request.user,
'authenticated': True, 'site_title': admin.site.site_title,
'site_header': admin.site.site_header,
'short_title': settings.ADMIN_SHORTCUTS[0]['shortcuts'][2]['title']}
return TemplateResponse(request, 'AddProfile.html', context)
else:
return HttpResponseRedirect('../')
def RecordAction(request):
if request.user.is_authenticated:
context = {'prefixpath': settings.CUSTOM_TEMPLATE_PATH, 'name': request.user,
'authenticated': True, 'site_title': admin.site.site_title,
'site_header': admin.site.site_header,
'short_title': settings.ADMIN_SHORTCUTS[0]['shortcuts'][3]['title']}
return TemplateResponse(request, 'RecordAction.html', context)
else:
return HttpResponseRedirect('../')
def ManageCitations(request):
if request.user.is_authenticated:
context = {'prefixpath': settings.CUSTOM_TEMPLATE_PATH, 'name': request.user,
'authenticated': True, 'site_title': admin.site.site_title,
'site_header': admin.site.site_header,
'short_title': settings.ADMIN_SHORTCUTS[0]['shortcuts'][4]['title']}
return TemplateResponse(request, 'ManageCitations.html', context)
else:
return HttpResponseRedirect('../')
###################################################################
# #
# def dataloggerfilesView(request, id):
# #model = Dataloggerfiles
# #template_name = 'admin/odm2testapp/dataloggerfiles/change_form.html'
# #'DataloggerfilecolumnsDisplay.html'
# DataloggerfilecolumnsList = Dataloggerfilecolumns.objects.filter(dataloggerfileid=id)
# DataloggerfilecolumnsListvalues = str(DataloggerfilecolumnsList.values())
# #raise ValidationError(DataloggerfilecolumnsListvalues)
# DataloggerfilecolumnsListvalues= DataloggerfilecolumnsList
# #DataloggerfilecolumnsListvalues.split('\'')
# #request.session["DataloggerfilecolumnsList"] =DataloggerfilecolumnsListvalues
# #fieldsets = Dataloggerfiles.objects.filter(dataloggerfileid=id)
# adm = DataloggerfilesAdmin(Dataloggerfiles,admin) #.change_form_template
# admform = DataloggerfilesAdminForm(request.POST)
# #data =request.POST
# data = {
# 'opts': Dataloggerfiles._meta,
# 'adminform': admform.formset,
# 'change': True,
# 'is_popup': False,
# 'to_field' : True,
# 'save_as': False,
# #'prepopulated_fields' : adm.get_prepopulated_fields(request),
# 'has_delete_permission': True,
# 'has_add_permission': True,
# 'has_change_permission': True,
# 'DataloggerfilecolumnsList' : DataloggerfilecolumnsListvalues,}
# #
def get_name_of_sampling_feature(selected_result):
title_feature_action = Featureactions.objects.filter(
featureactionid=selected_result.featureactionid.featureactionid).get()
title_sampling_feature = Samplingfeatures.objects.filter(
samplingfeatureid=title_feature_action.samplingfeatureid.samplingfeatureid).get()
s = str(title_sampling_feature.samplingfeaturename)
return s
def get_name_of_variable(selected_result):
title_variables = Variables.objects.filter(variableid=selected_result.variableid)
# s = str(title_variables.values_list('variablecode', flat=True))
name_of_variable = title_variables.variablecode # s.split('\'')[1]
return name_of_variable
def get_name_of_units(selected_result):
title_units = Units.objects.filter(unitsid=selected_result.values('unitsid'))
# s = str(title_units.values_list('unitsname', flat=True))
name_of_units = title_units.unitsname # s.split('\'')[1]
return name_of_units
def relatedFeaturesFilter(request, done, selected_resultid, featureaction,
resultType='Time series coverage', ):
# selected_relatedfeatid = 18
if 'SelectedRelatedFeature' in request.POST and 'update_result_list' not in request.POST:
if not request.POST['SelectedRelatedFeature'] == 'All':
done = True
selected_relatedfeatid = int(request.POST['SelectedRelatedFeature'])
relatedFeatureList = Relatedfeatures.objects.filter(
relatedfeatureid=int(selected_relatedfeatid)).distinct(
'relatedfeatureid')
relatedFeatureListLong = Relatedfeatures.objects.filter(relatedfeatureid=int(
selected_relatedfeatid))
# .select_related('samplingfeatureid','relationshiptypecv','relatedfeatureid')
samplingfeatids = relatedFeatureListLong.values_list('samplingfeatureid', flat=True)
if featureaction == 'All':
resultList = Results.objects.filter(
featureactionid__in=Featureactions.objects.filter(
samplingfeatureid__in=samplingfeatids))
# .select_related('variable','feature_action')
else:
resultList = Results.objects.filter(
featureactionid__in=Featureactions.objects.filter(
samplingfeatureid__in=samplingfeatids)).filter(
featureactionid=featureaction)
if 'update_result_on_related_feature' in request.POST:
# raise ValidationError(relatedFeatureList)
selected_relatedfeatid = relatedFeatureList[0].relatedfeatureid.samplingfeatureid
selected_resultid = resultList[0].resultid
else:
selected_relatedfeatid = request.POST['SelectedRelatedFeature']
if featureaction == 'All':
resultList = Results.objects.filter(
result_type=resultType) # remove slice just for testing [:25]
else:
resultList = Results.objects.filter(result_type=resultType).filter(
featureactionid=featureaction)
else:
selected_relatedfeatid = 'All'
if featureaction == 'All':
resultList = Results.objects.filter(
result_type=resultType) # remove slice just for testing [:25]
else:
resultList = Results.objects.filter(result_type=resultType).filter(
featureactionid=featureaction)
return selected_relatedfeatid, done, resultList, selected_resultid
def web_map(request):
if request.user.is_authenticated:
authenticated = True
else:
authenticated = False
map_config = settings.MAP_CONFIG
data_disclaimer = settings.DATA_DISCLAIMER
features = Samplingfeatures.objects.all()
datasets = Datasets.objects.all()
externalidentifiers = None
ids = [ds.datasetid for ds in datasets]
sf_type_list = [sf.sampling_feature_type for sf in features]
sf_types = set(sf_type_list)
terms = [sf_type.name for sf_type in sf_types]
ds_selections = request.POST.getlist('datasetselection')
if ds_selections != []:
selected_ds = []
for ds in ds_selections:
selected_ds.append(int(ds))
else:
selected_ds = ids
sftype_selections = request.POST.getlist('sftypeselection')
if sftype_selections != []:
selected_type = []
for sf in sftype_selections:
selected_type.append(sf)
else:
selected_type = terms
legend_ref = [ settings.LEGEND_MAP[sftype] for sftype in map_config['feature_types']]
base_maps = [
{
'name': 'Esri_NatGeoWorldMap',
'url': 'http://server.arcgisonline.com/ArcGIS/rest/services/NatGeo_World_Map/'
'MapServer/tile/{z}/{y}/{x}',
'options': {
'attribution': 'Tiles © Esri — National Geographic, Esri, DeLorme, '
'NAVTEQ, UNEP-WCMC, USGS, NASA, ESA, METI, NRCAN, GEBCO, NOAA, '
'iPC',
'maxZoom': 16
},
'group': 'ESRI Basemaps'
},
{
'name': 'Esri_WorldImagery',
'url': 'http://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/'
'MapServer/tile/{z}/{y}/{x}',
'options': {
'attribution': 'Tiles © Esri — Source: Esri, i-cubed, USDA, USGS, '
'AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the '
'GIS User Community'
},
'group': 'ESRI Basemaps'
},
{
'name': 'Esri_WorldTopoMap',
'url': 'http://server.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/'
'MapServer/tile/{z}/{y}/{x}',
'options': {
'attribution': 'Tiles © Esri — Esri, DeLorme, NAVTEQ, TomTom, '
'Intermap, iPC, USGS, FAO, NPS, NRCAN, GeoBase, Kadaster NL, '
'Ordnance Survey, Esri Japan, METI, Esri China (Hong Kong), and '
'the GIS User Community'
},
'group': 'ESRI Basemaps'
},
{
'name': 'MapBox_RunBikeHike',
'url': 'https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?'
'access_token={accessToken}',
'options': {
'maxZoom': 20,
'attribution': 'Map data © '
'<a href="http://openstreetmap.org">OpenStreetMap</a> '
'contributors, <a href="http://creativecommons.org/licenses/'
'by-sa/2.0/">CC-BY-SA</a>, Imagery © '
'<a href="http://mapbox.com">Mapbox</a>',
'id': 'mapbox.run-bike-hike',
'accessToken': map_config['MapBox']['access_token']
},
'group': 'OpenStreetMap Basemaps'
}
]
context = {
'prefixpath': settings.CUSTOM_TEMPLATE_PATH, 'legends': json.dumps(legend_ref),
'features': features,
'externalidentifiers': externalidentifiers,
'datasets': datasets, 'selecteddatasets': selected_ds, 'authenticated': authenticated,
'map_config': map_config,
'data_disclaimer': data_disclaimer, 'name': request.user,
'site_title': admin.site.site_title,
'site_header': admin.site.site_header, 'short_title': 'Map Locations',
'basemaps': base_maps, 'sf_types': sf_types, 'selectedterms': selected_type,
'selectedds': json.dumps(ds_selections), 'selectedtype': json.dumps(sftype_selections),
'urlpath': settings.BASE_URL
}
return render(request, 'mapdata.html', context)
def get_features(request, sf_type="all", ds_ids="all"):
if ds_ids == "all" or sf_type == "all":
features = Samplingfeatures.objects.exclude(featuregeometry__isnull=True)
elif sf_type == 'filtered':
dataset_ids = list(ds_ids.split(','))
datasetresults = Datasetsresults.objects.filter(datasetid__in=dataset_ids)
results = Results.objects.filter(resultid__in=datasetresults.values("resultid"))
fa = Featureactions.objects.filter(featureactionid__in=results.values("featureactionid"))
features1 = Samplingfeatures.objects.filter(
samplingfeatureid__in=fa.values("samplingfeatureid"))
relatedfeatures = Relatedfeatures.objects.filter(
samplingfeatureid__in=features1.values("samplingfeatureid"))
features2 = Samplingfeatures.objects.filter(
samplingfeatureid__in=relatedfeatures.values("relatedfeatureid"))
features = features1 | features2
elif ds_ids == 'filtered':
samplingfeature_types = list(sf_type.split(','))
if 'Site' in samplingfeature_types:
pass
features = Samplingfeatures.objects.filter(sampling_feature_type__in=samplingfeature_types)
else:
features = []
feats = [model_to_dict(f) for f in features]
feats_filtered = list()
for feat in feats:
sf = Samplingfeatures.objects.get(samplingfeatureid=feat['samplingfeatureid'])
# Get url to sf
feat.update({
'samplingfeatureurl': 'odm2admin/samplingfeatures/{}/change/'.format(sf.samplingfeatureid),
'samplingfeaturetypeurl': sf.sampling_feature_type.sourcevocabularyuri
})
# Get Site Attr
if sf.sampling_feature_type.name == 'Site':
try:
site = Sites.objects.get(samplingfeatureid=sf.samplingfeatureid)
feat.update({
'sitetype': site.sitetypecv.name,
'sitetypeurl': site.sitetypecv.sourcevocabularyuri
})
except Sites.DoesNotExist:
site = None
# Get Specimen Attr
if sf.sampling_feature_type.name == 'Specimen':
try:
specimen = Specimens.objects.get(samplingfeatureid=sf.samplingfeatureid)
feat.update({
'specimentype': specimen.specimentypecv.name,
'specimentypeurl': specimen.specimentypecv.sourcevocabularyuri,
'specimenmedium': specimen.specimenmediumcv.name,
'specimenmediumurl': specimen.specimenmediumcv.sourcevocabularyuri,
})
except Specimens.DoesNotExist:
specimen = None
# Get Relations
relationship = get_relations(sf)
if all(value == [] for value in relationship.values()):
feat.update({
'relationships': None
})
else:
feat.update({
'relationships': relationship
})
# Get IGSN's
if Samplingfeatureexternalidentifiers.objects.filter(
samplingfeatureid=sf.samplingfeatureid).first() is not None:
igsn = sf.samplingfeatureexternalidentifiers_set.get()
feat.update({
'igsn': igsn.samplingfeatureexternalidentifier,
'igsnurl': igsn.samplingfeatureexternalidentifieruri
})
# Get Soil top and bottom depth
if Samplingfeatureextensionpropertyvalues.objects.filter(
samplingfeatureid=sf.samplingfeatureid).first() is not None:
sfep = sf.samplingfeatureextensionpropertyvalues_set.get_queryset()
if len(sfep) != 0:
for ep in sfep:
feat.update({
'{}'.format(ep.propertyid.propertyname): ep.propertyvalue,
'{}_units'.format(ep.propertyid.propertyname): ep.propertyid.propertyunitsid.unitsabbreviation,
})
# Get lat, lon
lat = sf.featuregeometrywkt().coords[1]
lon = sf.featuregeometrywkt().coords[0]
epsg = None
if sf.featuregeometrywkt().crs is not None:
epsg = sf.featuregeometrywkt().crs.srid
if lat != 0 and lon != 0:
feat['featuregeometry'] = {
'lat': lat,
'lng': lon,
'crs': epsg
}
feats_filtered.append(feat)
return HttpResponse(json.dumps(feats_filtered))
def truncate(f, n):
'''Truncates/pads a float f to n decimal places without rounding'''
s = '{}'.format(f)
if 'e' in s or 'E' in s:
return '{0:.{1}f}'.format(f, n)
i, p, d = s.partition('.')
return '.'.join([i, (d+'0'*n)[:n]])
def sensor_dashboard(request, feature_action='NotSet', sampling_feature='NotSet'):
authenticated = True
if not request.user.is_authenticated:
# return HttpResponseRedirect('../')
authenticated = False
ids = settings.SENSOR_DASHBOARD['featureactionids']
timeseriesdays = settings.SENSOR_DASHBOARD['time_series_days']
fas = None
allfas = None
if not feature_action == 'NotSet':
selected_featureactionid = int(feature_action)
# print(selected_featureactionid)
fas = Featureactions.objects.filter(featureactionid=selected_featureactionid
).order_by('-samplingfeatureid')
allfas = Featureactions.objects.filter(featureactionid__in=ids).order_by('-samplingfeatureid')
elif not sampling_feature == 'NotSet':
selected_featureactionid = 'NotSet'
samplingfeatureid = int(sampling_feature)
fas = Featureactions.objects.filter(samplingfeatureid=samplingfeatureid
).order_by('-samplingfeatureid')
allfas = Featureactions.objects.filter(featureactionid__in=ids).order_by('-samplingfeatureid')
else:
selected_featureactionid = 'NotSet'
# print(selected_featureactionid)
fas = Featureactions.objects.filter(featureactionid__in=ids).order_by('-samplingfeatureid')
allfas = fas
#samplingfeatures = Samplingfeatures.filter(samplingfeatureid__in=fas)
results = Results.objects.filter(featureactionid__in=fas)
tsrs = Timeseriesresults.objects.filter(resultid__in=results)
endDateProperty = Extensionproperties.objects.get(propertyname__icontains="end date")
#calculated_result_properties={}
#for tsr in tsrs:
#print(tsr)
repvs = Resultextensionpropertyvalues.objects.filter(resultid__in=results).order_by("resultid","propertyid")
dcount = 0
dmaxcount = 0
lastResult = None
for repv in repvs:
# print(repv.resultid)
if "start date" in str(repv.propertyid.propertyname):
startdate = repv.propertyvalue
repv.propertyname = "Time series began on: "
elif "end date" in str(repv.propertyid.propertyname):
enddate = repv.propertyvalue
# (enddate)
repv.propertyname = "most recent value on: "
elif "dashboard count" in str(repv.propertyid.propertyname):
dcount = repv.propertyvalue
repv.propertyname = "number of values recorded over last " + str(timeseriesdays) + " days"
elif "dashboard maximum count" in str(repv.propertyid.propertyname):
dmaxcount = repv.propertyvalue
# print(dcount)
# print(dmaxcount)
# repv.propertyname = str(dcount) + " of " + str(dmaxcount)
repv.propertyname = "up time"
if float(dmaxcount) > 0:
repv.propertyvalue = str(dcount) + " of " + str(dmaxcount) + \
" or " + str(truncate((float(dcount)/float(dmaxcount))*100, 2)) + "%"
# else:
# print("dmaxcount less then 0")
# print(repv)
# print(repv.resultid)
elif "dashboard below lower bound count" in str(repv.propertyid.propertyname):
repv.propertyname = "values below lower bound "
elif "dashboard above upper bound count" in str(repv.propertyid.propertyname):
repv.propertyname = "values above upper bound "
#dashboard last recorded value
elif "dashboard sensor active" in str(repv.propertyid.propertyname):
repv.propertyname = "sensor active "
elif "dashboard last recorded value" in str(repv.propertyid.propertyname):
repv.propertyname = "last recorded value "
elif "dashboard begin date" in str(repv.propertyid.propertyname):
repv.propertyname = "values above upper bound "
repv.propertyvalue = None
else:
repv.propertyname = repv.propertyid.propertyname
lastResult = repv.resultid
return TemplateResponse(request,
'sensordashboard.html',
{'prefixpath': settings.CUSTOM_TEMPLATE_PATH,
'featureactions':fas,
'allfeatureactions':allfas,
'results': results,
'feature_action': selected_featureactionid,
'authenticated': authenticated,
'resultextionproperties':repvs,
'short_title': 'Time Series'}, )
def get_relations(s):
pf = Relatedfeatures.objects.filter(samplingfeatureid_id=s.samplingfeatureid)
cf = Relatedfeatures.objects.filter(relatedfeatureid_id=s.samplingfeatureid)
sibsf = []
parents = []
children = []
if pf.first() is not None:
sib = Relatedfeatures.objects.filter(relationshiptypecv_id='Is child of',
relatedfeatureid_id=pf.first().relatedfeatureid_id). \
exclude(samplingfeatureid_id=s.samplingfeatureid)
if sib.first() is not None:
sibsf = list(Samplingfeatureexternalidentifiers.objects.\
filter(samplingfeatureid__in=sib.\
values_list('samplingfeatureid_id', flat=True)). \
values('samplingfeatureexternalidentifieruri',
'samplingfeatureid__samplingfeaturecode',
'samplingfeatureid__samplingfeatureid',
'samplingfeatureexternalidentifier'
))
parents = list(Samplingfeatureexternalidentifiers.objects.\
filter(samplingfeatureid__in=pf.\
values_list('relatedfeatureid_id',
flat=True)).\
values('samplingfeatureexternalidentifieruri',
'samplingfeatureid__samplingfeaturecode',
'samplingfeatureid__samplingfeatureid',
'samplingfeatureexternalidentifier'
))
if cf.first() is not None:
children = list(Samplingfeatureexternalidentifiers.objects.\
filter(samplingfeatureid__in=cf.\
values_list('samplingfeatureid_id', flat=True)). \
values('samplingfeatureexternalidentifieruri',
'samplingfeatureid__samplingfeaturecode',
'samplingfeatureid__samplingfeatureid',
'samplingfeatureexternalidentifier'
))
return {
'parents': parents,
'siblings': sibsf,
'children': children
}
def TimeSeriesGraphing(request, feature_action='All'):
authenticated = True
if not request.user.is_authenticated:
return HttpResponseRedirect('../')
template = loader.get_template('chart.html')
selected_relatedfeatid = None
selected_resultid = None
if feature_action == 'All':
selected_featureactionid = 1
result = Results.objects.filter(featureactionid=selected_featureactionid).first()
selected_resultid = result.resultid
selected_relatedfeatid = selected_resultid
else:
selected_featureactionid = int(feature_action)
# relatedfeatureList
# update_result_on_related_feature
done = False
selected_relatedfeatid, done, \
resultList, selected_resultid = relatedFeaturesFilter(request,
done,
selected_relatedfeatid,
selected_resultid,
feature_action)
if 'SelectedFeatureAction' in request.POST and not done:
# raise ValidationError(done)
if not request.POST['SelectedFeatureAction'] == 'All':
selected_featureactionid = int(request.POST['SelectedFeatureAction'])
resultList = Results.objects.filter(featureactionid=selected_featureactionid)
if 'update_result_list' in request.POST:
pass
else:
selected_featureactionid = request.POST['SelectedFeatureAction']
resultList = Results.objects.filter(result_type="Time series coverage")
elif not done:
resultList = Results.objects.filter(featureactionid=selected_featureactionid)
# find the measurement results series that where selected.
numresults = resultList.count()
selectedMResultSeries = []
for i in range(0, numresults):
selectionStr = str('selection' + str(i))
if selectionStr in request.POST:
# raise ValidationError(request.POST[selectionStr])
for result in resultList:
if int(request.POST[selectionStr]) == result.resultid:
selectedMResultSeries.append(int(request.POST[selectionStr]))
# if 'selection0' in request.POST:
# raise ValidationError(request.POST['selection0'] + ' '+
# request.POST['selection1'])
# selected_resultid = request.POST['selection0']
# else:
# selected_resultid = 15
# if no series were selected (like on first load) set the series to some value.
if len(resultList) > 0 and len(selectedMResultSeries) == 0:
selectedMResultSeries.append(int(resultList[0].resultid))
elif len(resultList) == 0 and len(selectedMResultSeries) == 0:
selectedMResultSeries.append(15)
EndDateProperty = Extensionproperties.objects.get(propertyname__icontains="end date")
if 'startDate' in request.POST:
entered_start_date = request.POST['startDate']
else:
# entered_start_date = "2016-01-01"
recordedenddate = Resultextensionpropertyvalues.objects.\
filter(resultid=selected_resultid).filter(propertyid=EndDateProperty.propertyid).get()
end_date = recordedenddate.propertyvalue
enddt = time.strptime(end_date, "%Y-%m-%d %H:%M:%S.%f")
dt = datetime.fromtimestamp(mktime(enddt))
last_day_previous_month = dt - timedelta(days=30)
entered_start_date = last_day_previous_month.strftime('%Y-%m-%d %H:%M')
print(entered_start_date)
if 'endDate' in request.POST:
entered_end_date = request.POST['endDate']
else:
recordedenddate = Resultextensionpropertyvalues.objects.\
filter(resultid=selected_resultid).filter(propertyid=EndDateProperty.propertyid).get()
entered_end_date = recordedenddate.propertyvalue
if entered_end_date == '':
recordedenddate = Resultextensionpropertyvalues.objects.\
filter(resultid=selected_resultid).filter(propertyid=EndDateProperty.propertyid).get()
entered_end_date = recordedenddate.propertyvalue
if entered_start_date == '':
recordedenddate = Resultextensionpropertyvalues.objects.\
filter(resultid=selected_resultid).filter(propertyid=EndDateProperty.propertyid).get()
end_date = recordedenddate.propertyvalue
enddt = time.strptime(end_date, "%Y-%m-%d %H:%M:%S.%f")
dt = datetime.fromtimestamp(mktime(enddt))
last_day_previous_month = dt - timedelta(days=30)
entered_start_date = last_day_previous_month.strftime('%Y-%m-%d %H:%M')
# entered_start_date = "2016-01-01"
selected_results = []
name_of_sampling_features = []
name_of_variables = []
name_of_units = []
myresultSeries = []
i = 0
data = {}
for selectedMResult in selectedMResultSeries:
i += 1
selected_result = Results.objects.filter(resultid=selectedMResult).get()
selected_results.append(selected_result)
# name_of_sampling_features.append(get_name_of_sampling_feature(selected_result))
tmpname = get_name_of_sampling_feature(selected_result)
name_of_sampling_features.append(tmpname)
tmpname = get_name_of_variable(selected_result)
if name_of_variables.__len__() > 0:
namefound = False
for name in name_of_variables:
if name == tmpname:
namefound = True
if not namefound:
name_of_variables.append(tmpname)
else:
name_of_variables.append('')
else:
name_of_variables.append(tmpname)
tmpname = get_name_of_units(selected_result)
if name_of_units.__len__() > 0:
namefound = False
for name in name_of_units:
if name == tmpname:
namefound = True
if not namefound:
name_of_units.append(tmpname)
else:
name_of_units.append('')
else:
name_of_units.append(tmpname)
myresultSeries.append(Timeseriesresultvalues.objects.all().filter(
~Q(datavalue__lte=-6999)).filter(
valuedatetime__gt=entered_start_date).filter(
valuedatetime__lt=entered_end_date).filter(
resultid=selectedMResult).order_by('-valuedatetime'))
data.update({'datavalue' + str(i): []})
myresultSeriesExport = Timeseriesresultvalues.objects.all() \
.filter(valuedatetime__gt=entered_start_date) \
.filter(valuedatetime__lt=entered_end_date) \
.filter(resultid__in=selectedMResultSeries).order_by('-valuedatetime')
i = 0
for myresults in myresultSeries:
i += 1
for result in myresults:
start = datetime(1970, 1, 1)
delta = result.valuedatetime - start
mills = delta.total_seconds() * 1000
if math.isnan(result.datavalue):
dataval = 'null'
else:
dataval = result.datavalue
data['datavalue' + str(i)].append(
[mills, dataval])
# data['datavalue' + str(i)].append([mills, result.datavalue])
# #dumptoMillis(result.valuedatetime)
# data['datavalue'].extend(tmplist )
# data['valuedatetime'].append(dumptoMillis(result.valuedatetime))
# build strings for graph labels
i = 0
seriesStr = ''
series = []
titleStr = ''
tmpUnit = ''
tmpVariableName = ''
tmpLocName = ''
for name_of_unit, name_of_sampling_feature, name_of_variable in zip(name_of_units,
name_of_sampling_features,
name_of_variables):
i += 1
if i == 1 and not name_of_unit == '':
seriesStr += name_of_unit
elif not name_of_unit == '':
tmpUnit = name_of_unit
seriesStr += ' - ' + name_of_unit
if not name_of_variable == '':
tmpVariableName = name_of_variable
if not name_of_unit == '':
tmpUnit = name_of_unit
if not name_of_sampling_feature == '':
tmpLocName = name_of_sampling_feature
series.append(
{"name": tmpUnit + ' - ' + tmpVariableName + ' - ' + tmpLocName, "yAxis": tmpUnit,
"data": data['datavalue' + str(i)]})
i = 0
name_of_sampling_features = set(name_of_sampling_features)
for name_of_sampling_feature in name_of_sampling_features:
i += 1
if i == 1:
titleStr += name_of_sampling_feature # + ', ' +name_of_variable
else:
titleStr += ' - ' + name_of_sampling_feature # +name_of_variable+ ', '
chartID = 'chart_id'
chart = {"renderTo": chartID, "type": 'scatter', "zoomType": 'xy'}
title2 = {"text": titleStr}
xAxis = {"type": 'datetime', "title": {"text": 'Date'}}
yAxis = {"title": {"text": seriesStr}}
graphType = 'scatter'
actionList = Actions.objects.filter(
action_type="Observation") # where the action is not of type estimation
# assuming an estimate is a single value.
featureactionList = Featureactions.objects.filter(action__in=actionList)
relatedFeatureList = Relatedfeatures.objects.distinct(
'relatedfeatureid') # .order_by('relatedfeatureid')
int_selectedresultid_ids = []
for int_selectedresultid in selectedMResultSeries:
int_selectedresultid_ids.append(int(int_selectedresultid))
csvexport = False
# if the user hit the export csv button export the measurement results to csv
if 'export_data' in request.POST:
# if request.get('export_data'):
response = exportspreadsheet(request, myresultSeriesExport, False)
csvexport = True
# k=0
# myfile = StringIO.StringIO()
# for myresults in myresultSeriesExport:
# for result in myresults:
# if k==0:
# myfile.write(result.csvheader())
# myfile.write('\n')
# myfile.write(result.csvoutput())
# myfile.write('\n')
# k+=1
# response = HttpResponse(myfile.getvalue(),content_type='text/csv')
# response['Content-Disposition'] = 'attachment; filename="mydata.csv"'
if csvexport:
return response
else:
# raise ValidationError(relatedFeatureList)
return TemplateResponse(request,
template,
{'featureactionList': featureactionList,
'prefixpath': settings.CUSTOM_TEMPLATE_PATH,
'data_disclaimer': settings.DATA_DISCLAIMER,
'resultList': resultList,
'startDate': entered_start_date,
'endDate': entered_end_date,
'SelectedResults': int_selectedresultid_ids,
'authenticated': authenticated,
'chartID': chartID, 'chart': chart,
'series': series, 'title2': title2,
'graphType': graphType, 'xAxis': xAxis,
'yAxis': yAxis, 'name_of_units': name_of_units,
'relatedFeatureList': relatedFeatureList,
'SelectedRelatedFeature': selected_relatedfeatid,
'SelectedFeatureAction': selected_featureactionid,
'name': request.user,
'site_title': admin.site.site_title,
'site_header': admin.site.site_header,
'short_title': 'Time Series'}, )
template = loader.get_template('chart.html')
selected_relatedfeatid = None
selected_resultid = None
if feature_action == 'All':
selected_featureactionid = 1
result = Results.objects.filter(featureactionid=selected_featureactionid).first()
selected_resultid = result.resultid
selected_relatedfeatid = selected_resultid
else:
selected_featureactionid = int(feature_action)
# relatedfeatureList
# update_result_on_related_feature
done = False
selected_relatedfeatid, done, \
resultList, selected_resultid = relatedFeaturesFilter(
request, done, selected_relatedfeatid,
selected_resultid, feature_action
)
if 'SelectedFeatureAction' in request.POST and not done:
# raise ValidationError(done)
if not request.POST['SelectedFeatureAction'] == 'All':
selected_featureactionid = int(request.POST['SelectedFeatureAction'])
resultList = Results.objects.filter(featureactionid=selected_featureactionid)
if 'update_result_list' in request.POST:
pass
else:
selected_featureactionid = request.POST['SelectedFeatureAction']
resultList = Results.objects.filter(result_type="Time series coverage")
elif not done:
resultList = Results.objects.filter(featureactionid=selected_featureactionid)
# find the measurement results series that where selected.
numresults = resultList.count()
selectedMResultSeries = []
for i in range(0, numresults):
selectionStr = str('selection' + str(i))
if selectionStr in request.POST:
# raise ValidationError(request.POST[selectionStr])
for result in resultList:
if int(request.POST[selectionStr]) == result.resultid:
selectedMResultSeries.append(int(request.POST[selectionStr]))
# if 'selection0' in request.POST:
# raise ValidationError(request.POST['selection0']
# + ' '+ request.POST['selection1'])
# selected_resultid = request.POST['selection0']
# else:
# selected_resultid = 15
# if no series were selected (like on first load) set the series to some value.
if len(resultList) > 0 and len(selectedMResultSeries) == 0:
selectedMResultSeries.append(int(resultList[0].resultid))
elif len(resultList) == 0 and len(selectedMResultSeries) == 0:
selectedMResultSeries.append(15)
if 'startDate' in request.POST:
entered_start_date = request.POST['startDate']
else:
entered_start_date = "2016-01-01"
if 'endDate' in request.POST:
entered_end_date = request.POST['endDate']
else:
entered_end_date = "2016-01-05"
if entered_end_date == '':
entered_end_date = "2016-01-05"
if entered_start_date == '':
entered_start_date = "2016-01-01"
selected_results = []
name_of_sampling_features = []
name_of_variables = []
name_of_units = []
myresultSeries = []
i = 0
data = {}
for selectedMResult in selectedMResultSeries:
i += 1
selected_result = Results.objects.filter(resultid=selectedMResult).get()
selected_results.append(selected_result)
# name_of_sampling_features.append(get_name_of_sampling_feature(selected_result))
tmpname = get_name_of_sampling_feature(selected_result)
name_of_sampling_features.append(tmpname)
tmpname = get_name_of_variable(selected_result)
if name_of_variables.__len__() > 0:
namefound = False
for name in name_of_variables:
if name == tmpname:
namefound = True
if not namefound:
name_of_variables.append(tmpname)
else:
name_of_variables.append('')
else:
name_of_variables.append(tmpname)
tmpname = get_name_of_units(selected_result)
if name_of_units.__len__() > 0:
namefound = False
for name in name_of_units:
if name == tmpname:
namefound = True
if not namefound:
name_of_units.append(tmpname)
else:
name_of_units.append('')
else:
name_of_units.append(tmpname)
myresultSeries.append(Timeseriesresultvalues.objects.all().filter(
~Q(datavalue__lte=-6999)).filter(
valuedatetime__gt=entered_start_date).filter(
valuedatetime__lt=entered_end_date).filter(
resultid=selectedMResult).order_by('-valuedatetime'))
data.update({'datavalue' + str(i): []})
myresultSeriesExport = Timeseriesresultvalues.objects.all() \
.filter(valuedatetime__gt=entered_start_date) \
.filter(valuedatetime__lt=entered_end_date) \
.filter(resultid__in=selectedMResultSeries).order_by('-valuedatetime')
i = 0
for myresults in myresultSeries:
i += 1
for result in myresults:
start = datetime(1970, 1, 1)
delta = result.valuedatetime - start
mills = delta.total_seconds() * 1000
if math.isnan(result.datavalue):
dataval = 'null'
else:
dataval = result.datavalue
if popup == 'Anno':
data['datavalue' + str(i)].append(
{'x': mills, 'y': dataval, 'id': str(result.valueid)})
else:
data['datavalue' + str(i)].append(
[mills, dataval])
# data['datavalue' + str(i)].append([mills, result.datavalue])
# #dumptoMillis(result.valuedatetime)
# data['datavalue'].extend(tmplist )
# data['valuedatetime'].append(dumptoMillis(result.valuedatetime))
# build strings for graph labels
i = 0
seriesStr = ''
series = []
titleStr = ''
tmpUnit = ''
tmpVariableName = ''
tmpLocName = ''
for name_of_unit, name_of_sampling_feature, name_of_variable in zip(name_of_units,
name_of_sampling_features,
name_of_variables):
i += 1
if i == 1 and not name_of_unit == '':
seriesStr += name_of_unit
elif not name_of_unit == '':
tmpUnit = name_of_unit
seriesStr += ' - ' + name_of_unit
if not name_of_variable == '':
tmpVariableName = name_of_variable
if not name_of_unit == '':
tmpUnit = name_of_unit
if not name_of_sampling_feature == '':
tmpLocName = name_of_sampling_feature
series.append(
{"name": tmpUnit + ' - ' + tmpVariableName + ' - ' + tmpLocName, "yAxis": tmpUnit,
"data": data['datavalue' + str(i)]})
i = 0
name_of_sampling_features = set(name_of_sampling_features)
for name_of_sampling_feature in name_of_sampling_features:
i += 1
if i == 1:
titleStr += name_of_sampling_feature # + ', ' +name_of_variable
else:
titleStr += ' - ' + name_of_sampling_feature # +name_of_variable+ ', '
chartID = 'chart_id'
chart = {"renderTo": chartID, "type": 'scatter', "zoomType": 'xy'}
title2 = {"text": titleStr}
xAxis = {"type": 'datetime', "title": {"text": 'Date'}}
yAxis = {"title": {"text": seriesStr}}
graphType = 'scatter'
actionList = Actions.objects.filter(
action_type="Observation") # where the action is not of type estimation
# assuming an estimate is a single value.
featureactionList = Featureactions.objects.filter(action__in=actionList)
relatedFeatureList = Relatedfeatures.objects.distinct(
'relatedfeatureid') # .order_by('relatedfeatureid')
int_selectedresultid_ids = []
for int_selectedresultid in selectedMResultSeries:
int_selectedresultid_ids.append(int(int_selectedresultid))
csvexport = False
# if the user hit the export csv button export the measurement results to csv
if 'export_data' in request.POST:
# if request.get('export_data'):
response = exportspreadsheet(request, myresultSeriesExport, False)
csvexport = True
# k=0
# myfile = StringIO.StringIO()
# for myresults in myresultSeriesExport:
# for result in myresults:
# if k==0:
# myfile.write(result.csvheader())
# myfile.write('\n')
# myfile.write(result.csvoutput())
# myfile.write('\n')
# k+=1
# response = HttpResponse(myfile.getvalue(),content_type='text/csv')
# response['Content-Disposition'] = 'attachment; filename="mydata.csv"'
if csvexport:
return response
else:
# raise ValidationError(relatedFeatureList)
return TemplateResponse(request, template,
{'featureactionList': featureactionList,
'prefixpath': settings.CUSTOM_TEMPLATE_PATH,
'data_disclaimer': settings.DATA_DISCLAIMER, 'resultList': resultList,
'startDate': entered_start_date, 'endDate': entered_end_date,
'SelectedResults': int_selectedresultid_ids,
'authenticated': authenticated,
'chartID': chartID, 'chart': chart, 'series': series,
'title2': title2,
'graphType': graphType, 'xAxis': xAxis, 'yAxis': yAxis,
'name_of_units': name_of_units,
'relatedFeatureList': relatedFeatureList,
'SelectedRelatedFeature': selected_relatedfeatid,
'SelectedFeatureAction': selected_featureactionid,
'name': request.user,
'site_title': admin.site.site_title,
'site_header': admin.site.site_header,
'short_title': 'Time Series'}, )
def groupResultsByVariable(sampling_feature):
fas = Featureactions.objects.filter(samplingfeatureid=sampling_feature)
results = Results.objects.filter(featureactionid__in=fas).filter(
processing_level__in=settings.MAP_CONFIG['result_value_processing_levels_to_display']
)
groupedResults = {}
for result in results:
# print('id: ' + str(result.featureactionid.featureactionid) +' '+ str(result.featureactionid))
#if str(result.variableid.variable_name) == 'Water temperature':
# print('var code: ' + str(result.variableid.variable_name) + ' var id ' + str(
# result.variableid.variableid) + ' unit_type: ' + str(result.unitsid.unit_type) +
# ' processing level: ' + str(result.processing_level) + ' id: ' + str(result.resultid))
seriesname = str(result.variableid.variable_name) + '; units: ' + str(result.unitsid.unitsabbreviation) +\
'; ' + str(result.processing_level)
if str(seriesname) in groupedResults:
groupedResults[str(seriesname)].append(result.resultid)
else:
groupedResults[str(seriesname)] = [result.resultid]
# print('grouped results')
deletemes = []
for groupedResult in groupedResults:
# print(groupedResult)
i = 0
for result in groupedResults[groupedResult]:
# print(result) #,' : ',groupedResults[groupedResult][result]
i +=1
if i == 1:
deletemes.append(groupedResult)
for deleteme in deletemes:
groupedResults.pop(deleteme)
return groupedResults
def mappopuploader(request, feature_action='NotSet', samplingfeature='NotSet', dataset='NotSet',
resultidu='NotSet',
startdate='NotSet', enddate='NotSet', popup='NotSet'):
# print("HERE")
if not request.user.is_authenticated:
# return HttpResponseRedirect('../')
authenticated = False
else:
authenticated = True
if popup == 'NotSet':
template = loader.get_template('chart2.html')
else:
template = loader.get_template('chartpopup.html')
data_disclaimer = settings.DATA_DISCLAIMER
useDataset = False
useSamplingFeature = False
if dataset == 'NotSet':
if samplingfeature == 'NotSet':
feature_action = int(feature_action)
else:
samplingfeature = int(samplingfeature)
useSamplingFeature = True
else:
useDataset = True
dataset = int(dataset)
if resultidu != 'NotSet':
pass
featureActionLocation = None
featureActionMethod = None
datasetTitle = None
featureActions = None
datasetAbstract = None
methods = None
methodsOnly = 'False'
samplingfeatureid= None
resultListGrouped = None
try:
if not useDataset:
if useSamplingFeature:
samplefeature = Samplingfeatures.objects.\
filter(samplingfeatureid=samplingfeature).get()
samplingfeatureid = samplefeature.samplingfeatureid
featureActions = Featureactions.objects.\
filter(samplingfeatureid=samplefeature).\
order_by("action__method")
resultList = Results.objects.filter(featureactionid__in=featureActions
).order_by("featureactionid__action__method")
#.filter(
# processing_level__in=settings.MAP_CONFIG['result_value_processing_levels_to_display']
#)
actions = Actions.objects.filter(actionid__in=featureActions.values("action"))
methods = Methods.objects.filter(methodid__in=actions.values("method"))
featureActionLocation = samplefeature.samplingfeaturename
resultListGrouped = groupResultsByVariable(samplefeature)
# print(resultListGrouped)
else:
resultList = Results.objects.filter(featureactionid=feature_action
).order_by("featureactionid__action__method")
# .filter(
# processing_level__in=settings.MAP_CONFIG['result_value_processing_levels_to_display'])
featureActions = Featureactions.objects.filter(featureactionid=feature_action).get()
featureActionLocation = featureActions.samplingfeatureid.samplingfeaturename
samplingfeatureid = featureActions.samplingfeatureid.samplingfeatureid
featureActionMethod = featureActions.action.method.methodname
actions = Actions.objects.filter(actionid=featureActions.action.actionid).get()
methods = Methods.objects.filter(methodid=actions.method.methodid)
resultListGrouped = groupResultsByVariable(samplingfeatureid)
else:
datasetResults = Datasetsresults.objects.filter(datasetid=dataset)
resultList = Results.objects.filter(resultid__in=datasetResults.values(
"resultid")).order_by("featureactionid__action__method") #.filter(
# processing_level__in=settings.MAP_CONFIG['result_value_processing_levels_to_display']
#)
datasetTitle = Datasets.objects.filter(datasetid=dataset).get().datasettitle
datasetAbstract = Datasets.objects.filter(datasetid=dataset).get().datasetabstract
except(ObjectDoesNotExist) as e:
html = "<html><body>No Data Available Yet.</body></html>"
return HttpResponse(html)
try:
StartDateProperty = Extensionproperties.objects.get(propertyname__icontains="start date")
EndDateProperty = Extensionproperties.objects.get(propertyname__icontains="end date")
startdates = Resultextensionpropertyvalues.objects.\
filter(resultid__in=resultList.values("resultid")).filter(propertyid=StartDateProperty)
enddates = Resultextensionpropertyvalues.objects.\
filter(resultid__in=resultList.values("resultid")).filter(propertyid=EndDateProperty)
realstartdates = []
realenddates = []
for startdate in startdates:
if len(startdate.propertyvalue) == 16:
realstartdates.append(datetime.strptime(startdate.propertyvalue, "%Y-%m-%d %H:%M"))
elif len(startdate.propertyvalue) == 19:
realstartdates.append(datetime.strptime(startdate.propertyvalue, "%Y-%m-%d %H:%M:%S"))
else:
realstartdates.append(datetime.strptime(startdate.propertyvalue, "%Y-%m-%d %H:%M:%S.%f"))
for enddate in enddates:
if len(enddate.propertyvalue) == 16:
realenddates.append(datetime.strptime(enddate.propertyvalue, "%Y-%m-%d %H:%M")) #%Y-%m-%d %H:%M
elif len(enddate.propertyvalue) == 19:
realenddates.append(datetime.strptime(enddate.propertyvalue, "%Y-%m-%d %H:%M:%S"))
else:
realenddates.append(datetime.strptime(enddate.propertyvalue, "%Y-%m-%d %H:%M:%S.%f"))
startdate = min(realstartdates).strftime('%Y-%m-%d %H:%M')
enddate = max(realenddates).strftime('%Y-%m-%d %H:%M')
except (ObjectDoesNotExist) as e:
try:
startdate = Timeseriesresultvalues.objects.\
filter(resultid__in=resultList.values("resultid")).\
annotate(Min('valuedatetime')).\
order_by('valuedatetime')[0].valuedatetime.strftime('%Y-%m-%d %H:%M')
enddate = Timeseriesresultvalues.objects.\
filter(resultid__in=resultList.values("resultid")).\
annotate(Max('valuedatetime')).\
order_by('-valuedatetime')[0].valuedatetime.strftime('%Y-%m-%d %H:%M')
except IndexError as e:
# html = "<html><body>No Data Available Yet.</body></html>"
# return HttpResponse(html)
try:
startdate = Timeseriesresultvalues.objects.\
filter(resultid__in=resultList.values("resultid")).\
annotate(Min('valuedatetime')).\
order_by('valuedatetime')[0].valuedatetime.strftime('%Y-%m-%d %H:%M')
enddate = Timeseriesresultvalues.objects.\
filter(resultid__in=resultList.values("resultid")).\
annotate(Max('valuedatetime')).\
order_by('-valuedatetime')[0].valuedatetime.strftime('%Y-%m-%d %H:%M')
methodsOnly = 'True'
except IndexError as e:
html = "<html><body>No time series data available for this site.</body></html>"
return HttpResponse(html)
except ValueError as e:
# html = "<html><body>No Data Available Yet.</body></html>"
# return HttpResponse(html)
methodsOnly = 'True'
for result in resultList:
try:
tsr = Timeseriesresults.objects.filter(resultid=result).get()
result.timeintervalunits = tsr.intendedtimespacingunitsid
result.timeinterval = tsr.intendedtimespacing
except:
pass
processing_level__in = settings.MAP_CONFIG['result_value_processing_levels_to_display']
return TemplateResponse(request, template, {'prefixpath': settings.CUSTOM_TEMPLATE_PATH,
'useSamplingFeature': useSamplingFeature,
'methodsOnly': methodsOnly,
'featureActions': featureActions,
'featureActionMethod': featureActionMethod,
'featureActionLocation': featureActionLocation,
'data_disclaimer': data_disclaimer,
'datasetTitle': datasetTitle,
'samplingfeatureid': samplingfeatureid,
'datasetAbstract': datasetAbstract,
'useDataset': useDataset, 'startDate': startdate,
'endDate': enddate,
'processing_level__in': processing_level__in,
'authenticated': authenticated, 'methods': methods,
'resultList': resultList,
'resultListGrouped': resultListGrouped}, )
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def precision_and_scale(x):
max_digits = 14
int_part = int(abs(x))
magnitude = 1 if int_part == 0 else int(math.log10(int_part)) + 1
if magnitude >= max_digits:
return (magnitude, 0)
frac_part = abs(x) - int_part
multiplier = 10 ** (max_digits - magnitude)
frac_digits = multiplier + int(multiplier * frac_part + 0.5)
while frac_digits % 10 == 0:
frac_digits /= 10
scale = int(math.log10(frac_digits))
return (magnitude + scale, scale)
def add_shiftvalues(request):
shift=None
error = None
resultid = None
shiftvals = None
lastshiftval = None
firstshiftval = None
realshiftvals = []
response_data = {}
forwardshift = True
if 'direction' in request.POST:
if request.POST['direction'] == 'backward':
forwardshift = False
if 'shift' in request.POST:
shift = Decimal(request.POST['shift'])
# print(offset)
if 'shiftvals[]' in request.POST:
shiftvals = request.POST.getlist('shiftvals[]')
# print(annotationvals)
if 'resultidu[]' in request.POST:
resultid = request.POST.getlist('resultidu[]')
# print('resultid: ' + str(resultid))
for rid in resultid:
intrid = int(rid)
# print('result id')
# print(rid)
# firstdate = shiftvals[0]
# lastdate = shiftvals[-2]
idvals = []
i=0
for offsetval in shiftvals:
# print(offsetval)
# if i % 3 == 0:
# datevals.append(datetime.strptime(offsetval, '%Y-%m-%d %H:%M:%S'))
if i % 3 == 2:
idvals.append(int(offsetval))
i += 1
try:
order = ''
if forwardshift:
order = 'valuedatetime'
else:
order = '-valuedatetime'
tsrvs = Timeseriesresultvalues.objects.filter(resultid=rid).filter(valueid__in=idvals).order_by(order) # .filter(valuedatetime__gte=firstdate).filter(
# valuedatetime__lte=lastdate).filter(datavalue__in=valstochange).order_by('valuedatetime')
realshiftvals = tsrvs
except ObjectDoesNotExist:
response_data['error'] = 'no values found'
valcount = realshiftvals.count()
precision, scale = precision_and_scale(realshiftvals.last().datavalue)
getcontext().prec = precision
normshift = shift - Decimal(realshiftvals.last().datavalue)
shiftval = normshift / valcount
k = 1
for tsrv in realshiftvals:
if k > 1:
tsrv.datavalue = float(Decimal(Decimal(tsrv.datavalue) + (shiftval*k)))
tsrv.save()
# print(tsrv.datavalue)
k +=1
return HttpResponse(json.dumps(response_data),content_type='application/json')
def add_offset(request):
offset=None
error = None
resultid = None
offsetvals = None
response_data = {}
if 'offset' in request.POST:
offset = Decimal(request.POST['offset'])
# print('offset')
# print(offset)
if 'offsetvals[]' in request.POST:
# THESE VALUES ARE NOT ORDERED CORRECTLY
offsetvals = request.POST.getlist('offsetvals[]')
# print(offsetvals)
if 'resultidu[]' in request.POST:
resultid = request.POST.getlist('resultidu[]')
# print('resultid: ' + str(resultid))
valcount = 0
i=0
# datevals = []
idvals = []
for offsetval in offsetvals:
# print(offsetval)
# if i % 3 == 0:
# datevals.append(datetime.strptime(offsetval, '%Y-%m-%d %H:%M:%S'))
if i % 3 == 2:
idvals.append(int(offsetval))
i+=1
# datevals = sorted(datevals)
# print(datevals)
i = 0
for rid in resultid:
intrid = int(rid)
tsrvs = Timeseriesresultvalues.objects.filter(resultid=rid).filter(valueid__in=idvals)# .filter(datavalue__in=valstochange).filter(valuedatetime__gte=firstdate).filter(
# valuedatetime__lte=lastdate).filter(datavalue__in=valstochange)
print(tsrvs.query)
for tsrv in tsrvs:
tsrv.datavalue = Decimal(tsrv.datavalue) + offset
tsrv.save()
# print(tsrv.datavalue)
response_data['valuesadded'] = valcount
return HttpResponse(json.dumps(response_data),content_type='application/json')
def add_annotation(request):
# print('annotate')
resultid = None
annotationvals = None
annotation = None
setNaNstr = None
setNaN = False
cvqualitycode = False
response_data = {}
annotationobj = None
anno = None
if 'resultidu[]' in request.POST:
resultid = request.POST.getlist('resultidu[]')
# print(resultid)
if 'annotation' in request.POST:
annotationFromUser = str(request.POST['annotation'])
response_data['annotation'] = annotationFromUser
# print(annotation)
# annotationtype
if 'cvqualitycode' in request.POST:
cvqualitycode = str(request.POST['cvqualitycode'])
# print(cvqualitycode)
if cvqualitycode == 'Select':
cvqualitycode = False
if 'setNaN' in request.POST:
setNaNstr = str(request.POST['setNaN'])
if setNaNstr == 'false':
setNaN = False
if setNaNstr == 'true':
setNaN = True
# print(setNaN)
if 'annotationvals[]' in request.POST:
annotationvals = request.POST.getlist('annotationvals[]')
# print(annotationvals)
annotationtype = CvAnnotationtype.objects.get(name='Time series result value annotation')
if cvqualitycode:
qualitycode = CvQualitycode.objects.get(name=cvqualitycode)
# annotator = People.objects.filter(personfirstname='Miguel').filter(personlastname='Leon')
lastannotationval = None
for rid in resultid:
intrid = int(rid)
# print('result id')
# print(rid)
idvals = []
i = 0
for annotationval in annotationvals:
# print(offsetval)
# if i % 3 == 0:
# datevals.append(datetime.strptime(offsetval, '%Y-%m-%d %H:%M:%S'))
if i % 3 == 2:
idvals.append(int(annotationval))
i += 1
# firstdate = annotationvals[0]
# lastdate = annotationvals[-2]
# print(firstdate)
# print(lastdate)
# tsrvs = Timeseriesresultvalues.objects.filter(resultid=rid).filter(valuedatetime__gte=firstdate).filter(
# valuedatetime__lte=lastdate).filter(datavalue__in=valstochange)
tsrvs = Timeseriesresultvalues.objects.filter(resultid=rid).filter(valueid__in=idvals)
for tsrv in tsrvs:
# print(tsrv.datavalue)
# print(tsrv.valuedatetime)
if setNaN:
annotation = annotationFromUser + ' original value was ' + str(tsrv.datavalue)
# print(annotation)
if len(annotation) > 499:
annotation = annotation[:499]
try:
tsrvanno = Timeseriesresultvalueannotations.objects.filter(valueid=tsrv).get()
annotationobj = Annotations.objects.filter(annotationid=tsrvanno.annotationid.annotationid).get()
annotationobj.annotationtypecv = annotationtype
annotationobj.annotationcode = ''
annotationobj.annotationtext = annotation
annotationobj.annotationdatetime = datetime.now()
annotationobj.annotationutcoffset = 4
annotationobj.save()
except ObjectDoesNotExist:
# print('error')
#if not annotationobj:
# print('annotation does not exist')
annotationobj = Annotations(annotationtypecv=annotationtype, annotationcode='',
annotationtext=annotation, annotationdatetime=datetime.now(),
annotationutcoffset=4)
annotationobj.save()
tsrvanno = Timeseriesresultvalueannotations(valueid=tsrv,
annotationid=annotationobj)
tsrvanno.save()
# print(annotationobj)
# print(annotation)
# annotationobj.save()
tsrv.datavalue = float('nan')
if cvqualitycode:
tsrv.qualitycodecv = qualitycode
tsrv.save(force_update=True)
# print(tsrv)
elif cvqualitycode:
tsrv.qualitycodecv = qualitycode
tsrv.save(force_update=True)
if not setNaN:
annotation = annotationFromUser
try:
tsrvanno = Timeseriesresultvalueannotations.objects.filter(valueid=tsrv).get()
anno = Annotations.objects.filter(annotationid=tsrvanno.annotationid.annotationid).get()
anno.annotationtypecv = annotationtype
anno.annotationcode = ''
anno.annotationtext = annotation
anno.annotationdatetime = datetime.now()
anno.annotationutcoffset = 4
anno.save()
except ObjectDoesNotExist:
# print('error')
# if not annotationobj:
#print('annotation does not exist')
annotationobj = Annotations(annotationtypecv=annotationtype, annotationcode='',
annotationtext=annotation, annotationdatetime=datetime.now(),
annotationutcoffset=4)
annotationobj.save()
tsrvanno = Timeseriesresultvalueannotations(valueid=tsrv,
annotationid=annotationobj)
tsrvanno.save()
if cvqualitycode:
tsrv.qualitycodecv = qualitycode
tsrv.save(force_update=True)
# try:
# tsrvanno = Timeseriesresultvalueannotations.objects.filter(valueid=tsrv).get()
# tsrvanno.annotationid = annotationobj
# except ObjectDoesNotExist:
# tsrvanno = Timeseriesresultvalueannotations(valueid=tsrv,
# annotationid=annotationobj)
# tsrvanno.save()
# print(tsrvanno)
# tsrvanno.save()
# print(tsrvanno.valueid)
# lastannotationval = annotationval
# if resultidu != 'NotSet':
# resultidu = int(resultidu)
return HttpResponse(json.dumps(response_data),content_type='application/json')
# def on_raw_message(body):
# print(body)
def preProcDataLoggerFile(request):
response_data = {}
formData = None
dataloggerfileid = None
processingCode = None
databeginson = None
columnheaderson = None
check_dates = False
# print('in view')
# print(request.POST)
if 'dataloggerfileid' in request.POST:
dataloggerfileid = int(request.POST['dataloggerfileid'])
# print(dataloggerfileid)
if 'processingCode' in request.POST:
processingCode = request.POST['processingCode']
# print(processingCode)
if 'databeginson' in request.POST:
databeginson = int(request.POST['databeginson'])
# print(databeginson)
if 'columnheaderson' in request.POST:
columnheaderson = int(request.POST['columnheaderson'])
# print(columnheaderson)
if 'check_dates' in request.POST:
if request.POST['check_dates'] == 'True':
check_dates = True
dlf = Dataloggerfiles.objects.get(dataloggerfileid=dataloggerfileid)
pdlf = ProcessDataloggerfile.objects.get(dataloggerfileid=dataloggerfileid)
linkname = str(dlf.dataloggerfilelinkname())
fileid = dlf.dataloggerfileid
out = StringIO()
try:
management.call_command('validate_datalogger_file', linkname, str(fileid)
, str(databeginson), str(columnheaderson), stdout=out)
# messages = 'complete '
messages = out.getvalue()
response_data['validatemessage'] = str(messages) # e.with_traceback()
response = HttpResponse(json.dumps(response_data), content_type='application/json')
except CommandError as e:
response_data['error_message'] = str(e) #e.with_traceback()
response = HttpResponse(json.dumps(response_data), content_type='application/json')
response.status_code = 400
return response
# @shared_task
def procDataLoggerFile(request):
response_data = {}
formData = None
dataloggerfileid = None
processingCode = None
databeginson = None
columnheaderson = None
check_dates=False
# print('in view')
# print(request.POST)
if 'dataloggerfileid' in request.POST:
dataloggerfileid = int(request.POST['dataloggerfileid'])
# print(dataloggerfileid)
if 'processingCode' in request.POST:
processingCode = request.POST['processingCode']
# print(processingCode)
if 'databeginson' in request.POST:
databeginson = int(request.POST['databeginson'])
# print(databeginson)
if 'columnheaderson' in request.POST:
columnheaderson = int(request.POST['columnheaderson'])
# print(columnheaderson)
if 'check_dates' in request.POST:
if request.POST['check_dates'] =='True':
check_dates = True
# print(check_dates)
# print(dataloggerfileid)
dlf = Dataloggerfiles.objects.get(dataloggerfileid=dataloggerfileid)
pdlf = ProcessDataloggerfile.objects.get(dataloggerfileid=dataloggerfileid)
linkname = str(dlf.dataloggerfilelinkname())
fileid = dlf.dataloggerfileid
ftpfile = dlf.dataloggerfiledescription
ftpparse = urlparse(ftpfile)
response = None
try:
if not pdlf.processingCode == 'locked' and not pdlf.processingCode=='done':
# pdlf.processingCode = 'locked'
# pdlf.save()
if len(ftpparse.netloc) > 0:
ftpfrequencyhours = 24 # re.findall(r'^\D*(\d+)', self.processingCode)[0]
management.call_command('update_preprocess_process_datalogger_file', linkname, str(fileid)
, str(databeginson), str(columnheaderson),
str(ftpfrequencyhours), False)
else:
# print('processdataloggerfile')
# result = tasks.pdataloggerfile.apply_async((linkname,fileid,databeginson,columnheaderson,check_dates,False))
management.call_command('ProcessDataLoggerFile', linkname ,str(fileid)
, str(databeginson), str(columnheaderson),
check_dates, False, False)
# print(result)
pdlf.processingCode = 'done'
pdlf.save()
response = HttpResponse(json.dumps(response_data), content_type='application/json')
except CommandError as e:
response_data['error_message'] = str(e) #e.with_traceback()
response = HttpResponse(json.dumps(response_data), content_type='application/json')
response.status_code = 400
#response_data['formData'] = formData
return response
def addL1timeseries(request):
resultid = None
response_data = {}
createorupdateL1 = None
pl1 = Processinglevels.objects.get(processinglevelid=2)
pl0 = Processinglevels.objects.get(processinglevelid=1)
valuesadded = 0
tsresultTocopyBulk = []
if 'createorupdateL1' in request.POST:
createorupdateL1 = str(request.POST['createorupdateL1'])
if 'resultidu[]' in request.POST:
resultid = request.POST.getlist('resultidu[]')
for result in resultid:
if createorupdateL1 == "create":
#print('create')
resultTocopy = Results.objects.get(resultid=result)
tsresultTocopy = Timeseriesresults.objects.get(resultid=result)
resultTocopy.resultid = None
resultTocopy.processing_level = pl1
resultTocopy.save()
tsrvToCopy = Timeseriesresultvalues.objects.filter(resultid=tsresultTocopy)
tsresultTocopy.resultid = resultTocopy
tsresultTocopy.save()
newresult = tsresultTocopy.resultid
# tsrvToCopy.update(resultid=tsresultTocopy)
for tsrv in tsrvToCopy:
tsrv.resultid = tsresultTocopy
try:
tsrva = Timeseriesresultvalueannotations.objects.get(valueid = tsrv.valueid)
tsrv.valueid = None
tsrv.save()
tsrva.valueid = tsrv
# print(tsrv.valueid)
tsrva.save()
except ObjectDoesNotExist:
tsrv.valueid = None
tsresultTocopyBulk.append(tsrv)
newtsrv = Timeseriesresultvalues.objects.bulk_create(tsresultTocopyBulk)
elif createorupdateL1 == "update":
print('update')
tsresultL1 = Timeseriesresults.objects.get(resultid=result)
resultL1 = Results.objects.get(resultid=result)
# tsrvL1 = Timeseriesresultvalues.objects.filter(resultid=tsresultL1)
tsrvAddToL1Bulk = []
relatedL0result = Results.objects.filter(
featureactionid = resultL1.featureactionid).filter(
variableid = resultL1.variableid
).filter(unitsid = resultL1.unitsid).filter(
processing_level=pl0)
# newresult = relatedL0result.resultid
relateL0tsresults = Timeseriesresults.objects.filter(resultid__in= relatedL0result)
relateL0tsresult = None
for L0result in relateL0tsresults:
if L0result.intendedtimespacing == tsresultL1.intendedtimespacing and L0result.intendedtimespacingunitsid == tsresultL1.intendedtimespacingunitsid:
relateL0tsresult =L0result
tsrvL0 = Timeseriesresultvalues.objects.filter(resultid=relateL0tsresult)
# print(relateL0tsresult)
# maxtsrvL1=Timeseriesresultvalues.objects.filter(resultid=relateL1tsresult).annotate(
# Max('valuedatetime')). \
# order_by('-valuedatetime')
# print(relateL1tsresult)
# for r in maxtsrvL1:
# print(r)
print('L1 result')
print(tsresultL1)
maxtsrvL0=Timeseriesresultvalues.objects.filter(resultid=relateL0tsresult).annotate(
Max('valuedatetime')). \
order_by('-valuedatetime')[0].valuedatetime
maxtsrvL1=Timeseriesresultvalues.objects.filter(resultid=tsresultL1).annotate(
Max('valuedatetime')). \
order_by('-valuedatetime')[0].valuedatetime
mintsrvL0=Timeseriesresultvalues.objects.filter(resultid=relateL0tsresult).annotate(
Min('valuedatetime')). \
order_by('valuedatetime')[0].valuedatetime
mintsrvL1=Timeseriesresultvalues.objects.filter(resultid=tsresultL1).annotate(
Min('valuedatetime')). \
order_by('valuedatetime')[0].valuedatetime
# print('max L0')
# print(maxtsrvL0)
# print('max L1')
# print(maxtsrvL1)
if maxtsrvL1 < maxtsrvL0:
tsrvAddToL1 = tsrvL0.filter(valuedatetime__gt=maxtsrvL1)
for tsrv in tsrvAddToL1:
tsrv.resultid = tsresultL1
try:
tsrva = Timeseriesresultvalueannotations.objects.get(valueid = tsrv.valueid)
tsrv.valueid = None
tsrv.save()
tsrva.valueid = tsrv
# print(tsrv.valueid)
tsrva.save()
except ObjectDoesNotExist:
# print('doesnt exist')
tsrv.valueid = None
tsresultTocopyBulk.append(tsrv)
if mintsrvL1 > mintsrvL0:
tsrvAddToL1 = tsrvL0.filter(valuedatetime__lt=mintsrvL1)
for tsrv in tsrvAddToL1:
print(tsresultL1)
tsrv.resultid = tsresultL1
try:
tsrva = Timeseriesresultvalueannotations.objects.get(valueid = tsrv.valueid)
tsrv.valueid = None
tsrv.save()
tsrva.valueid = tsrv
# print(tsrv.valueid)
tsrva.save()
except ObjectDoesNotExist:
tsrv.valueid = None
tsresultTocopyBulk.append(tsrv)
newtsrv = Timeseriesresultvalues.objects.bulk_create(tsresultTocopyBulk)
valuesadded = newtsrv.__len__()
print(valuesadded)
# for tsrv in newtsrv:
# print(tsrv.resultid.resultid)
# print(tsrv)
response_data['valuesadded'] = valuesadded
# response_data['newresultid'] = newresult
# print(result)
return HttpResponse(json.dumps(response_data),content_type='application/json')
#another approach
#https://rlskoeser.github.io/2016/03/31/migrating-data-between-databases-with-django/
def createODM2SQLiteFile(request):
entered_end_date = ''
entered_start_date = ''
myresultSeriesExport = []
if 'exportdata' in request.POST and 'myresultSeriesExport[]' in request.POST:
selectedMResultSeries = request.POST.getlist('myresultSeriesExport[]')
myresultSeriesExport = None
if request.POST['useDates'] == 'true':
useDates = True
else:
useDates = False
if useDates:
if 'endDate' in request.POST:
# print(entered_end_date)
entered_end_date = request.POST['endDate']
if 'startDate' in request.POST:
entered_start_date = request.POST['startDate']
#Employees.objects.values_list('eng_name', flat=True)
myresultSeriesExport = Timeseriesresultvalues.objects.all() \
.filter(valuedatetime__gte=entered_start_date) \
.filter(valuedatetime__lte=entered_end_date) \
.filter(resultid__in=selectedMResultSeries).order_by('-valuedatetime')
else:
myresultSeriesExport = Timeseriesresultvalues.objects.all() \
.filter(resultid__in=selectedMResultSeries).order_by('-valuedatetime')
# emailspreadsheet2(request, myresultSeriesExport, False)
#management.call_command('dump_object', 'odm2admin.Timeseriesresults', 17160, 17162, kitchensink=True)
sysout = sys.stdout
loc = settings.FIXTURE_DIR
# print(myresultSeriesExport.first())
random_string = get_random_string(length=5)
tmpfixture1 = 'tmp' + '.json' #+ random_string
sys.stdout = open(loc+ tmpfixture1, 'w')
tmploc1 = loc+ tmpfixture1
management.call_command('dump_object', 'odm2admin.Timeseriesresultvalues', myresultSeriesExport.first().valueid, kitchensink=True)
sys.stdout.close()
#jsonfile = open(loc+ 'tmp2.json', 'w')
# i=0
values = myresultSeriesExport.values_list('valueid', flat=True)
random_string = get_random_string(length=5)
# add random string back later
tmpfixture2 = 'tmp' + '.json' # + random_string
sys.stdout = open(loc + tmpfixture2, 'w')
# sys.stdout = open(loc + 'tmp2.json, 'w')
tmploc2 = loc+ tmpfixture1
sys.stdout.write(serializers.serialize("json", myresultSeriesExport[1:], indent=4,use_natural_foreign_keys=False,use_natural_primary_keys=False))
sys.stdout.close()
sys.stdout = sysout
#settings.MAP_CONFIG['result_value_processing_levels_to_display']
#db_name = exportdb.DATABASES['export']['NAME']
#print(db_name)
# print(tmploc1)
database = ''
if 'exportdata' in request.POST:
# print(entered_end_date)
exportdata = request.POST['exportdata']
if exportdata == 'true':
database = 'export'
if 'publishdata' in request.POST:
# print(entered_end_date)
publishdata = request.POST['publishdata']
if publishdata == 'true':
database = 'published'
#management.call_command('loaddata',
# tmploc1 ,database=database) # ,database='export'
# print('finished first file')
#management.call_command('loaddata',
# tmploc2,database=database)
#export_data.send(sender= Timeseriesresultvalues,tmploc1=tmploc1,tmploc2=tmploc2)
#management.call_command('create_sqlite_export',tmploc1,tmploc2, settings=exportdb)
# call('../')
# print(tmploc1)
# print(tmploc2)
dbfilepath = exportdb.DATABASES['default']['NAME']
path = os.path.dirname(dbfilepath)
dbfile = os.path.basename(dbfilepath)
dbfilename = os.path.splitext(dbfile)[0]
random_string = get_random_string(length=5)
dbfile2 = path +"/" + dbfilename + random_string + ".db"
#command = ['python', '/home/azureadmin/webapps/ODM2-AdminLCZO/manageexport.py', 'create_sqlite_export', tmploc1, tmploc2]
command = 'cp ' + dbfilepath + ' ' + str(dbfile2)
# print(command)
response = subprocess.check_call(command,shell=True)
#write an extra settings file instead - have it contain just DATABASES; remove databases from exportdb.py and import new file. 2
exportdb.DATABASES['default']['NAME'] = dbfile2
command = settings.BASE_DIR + '/scripts/create_sqlite_file.sh '+ dbfile2 + ' %>> ' + settings.BASE_DIR +'/logging/sqlite_export.log'
# print(command)
response = subprocess.check_call(command,shell=True) #
# print("response")
# print(response)
# print(exportdb.DATABASES['default']['NAME'])
return myresultSeriesExport
# outfile = loc +'tmp2.json'
# print(outfile)
# with open(outfile, 'w') as jsonfile:
# json.dump(data, jsonfile)
#outfile = loc +'tmp2.json'
#print(outfile)
#with open(outfile, 'w') as jsonfile:
# json.dump(data, jsonfile)
@login_required()
def export_to_hydroshare(request):
valuestoexport = createODM2SQLiteFile(request)
export_complete = True
resource_link = ''
user = request.user
# print(request.POST['hydroshareusername'])
if 'hydroshareusername' in request.POST and 'hydrosharepassword' in request.POST:
hs_client_id = settings.SOCIAL_AUTH_HYDROSHARE_UP_KEY
hs_client_secret = settings.SOCIAL_AUTH_HYDROSHARE_UP_SECRET
username = request.POST['hydroshareusername']
password = request.POST['hydrosharepassword']
auth = HydroShareAuthOAuth2(hs_client_id, hs_client_secret,
username=username, password=password)
else:
hs_client_id = settings.SOCIAL_AUTH_HYDROSHARE_KEY
hs_client_secret = settings.SOCIAL_AUTH_HYDROSHARE_SECRET
social = user.social_auth.get(provider='hydroshare')
token = social.extra_data['access_token']
print(social.extra_data)
print(token)
auth = HydroShareAuthOAuth2(hs_client_id, hs_client_secret,
token=social.extra_data)
#hs = get_oauth_hs(request)
#userInfo = hs.getUserInfo()
#
# token = None
#if 'code' in request.POST:
# print(request.POST['code'])
# token = request.POST['code']
#print('expires in ' + str(token['expires_in']))
#auth = HydroShareAuthOAuth2(client_id, client_secret,
# username='', password='')
hs = HydroShare(auth=auth)
username = hs.getUserInfo()
# print(username)
abstracttext = 'ODM2 Admin Result Series ' + str(valuestoexport.first().resultid)
entered_start_date = None
entered_end_date = None
if 'startDate' in request.POST:
entered_start_date = request.POST['startDate']
abstracttext += ' data values from: ' + entered_start_date
if 'endDate' in request.POST:
# # print(entered_end_date)
entered_end_date = request.POST['endDate']
abstracttext += ' ending on: ' + entered_end_date
#
abstract = abstracttext
title = 'ODM2 Admin Result Series ' + str(valuestoexport.first().resultid)
keywords = ['ODM2']
rtype = 'GenericResource'
fpath = exportdb.DATABASES['default']['NAME']
# # print(fpath)
# #metadata = '[{"coverage":{"type":"period", "value":{"start":"'+entered_start_date +'", "end":"'+ entered_end_date +'"}}}, {"creator":{"name":"Miguel Leon"}}]'
metadata = str('[{"coverage":{"type":"period", "value":{"start":"' + entered_start_date + '", "end":"' + entered_end_date + '"}}}, ' \
'{"creator":{"name":"' +user.get_full_name() +'"}}]')
extra_metadata = str('{"key-1": "value-1", "key-2": "value-2"}')
#
# #abstract = 'My abstract'
# #title = 'My resource'
# #keywords = ('my keyword 1', 'my keyword 2')
# #rtype = 'GenericResource'
# #fpath = 'C:/Users/leonmi/Google Drive/ODM2AdminLT2/ODM2SQliteBlank.db'
# #metadata = '[{"coverage":{"type":"period", "value":{"start":"01/01/2000", "end":"12/12/2010"}}}, {"creator":{"name":"John Smith"}}, {"creator":{"name":"Lisa Miller"}}]'
# #extra_metadata = '{"key-1": "value-1", "key-2": "value-2"}'
resource_id = hs.createResource(rtype, title, resource_file=fpath, keywords=keywords, abstract=abstract,
metadata=metadata, extra_metadata=extra_metadata)
# print(resource_id)
# for resource in hs.getResourceList():
# print(resource)
return HttpResponse({'prefixpath': settings.CUSTOM_TEMPLATE_PATH,
'export_complete': export_complete,
'username' : username,
'resource_link': resource_link,},content_type='application/json')
@login_required()
def email_data_from_graph(request):
# print('email data')
emailsent = False
outEmail = ''
entered_end_date = ''
entered_start_date = ''
myresultSeriesExport = []
if 'email_data' in request.POST and 'resultidu[]' or 'myresultSeriesExport[]' in request.POST:
# print(' email data and resultid[]')
selectedMResultSeries = request.POST.getlist('myresultSeriesExport[]')
# print(selectedMResultSeries)
# resultids = request.POST.getlist('resultidu[]')
# try:
# print(resultidu)
# resultidu = [int(selectedMResultSeries)]
# except TypeError:
# resultids = re.findall(r'\d+',request.POST.getlist('myresultSeriesExport[]')) # re.findall(r'\d+',request.POST['myresultSeriesExport[]'])
resultidu = []
# mergeResults = 'true'
for results in selectedMResultSeries:
ids = re.findall(r'\d+', results)
for id in ids:
resultidu.append(int(id))
# for results in resultids:
# ids = re.findall(r'\d+', results)
# for id in ids:
# resultidu.append(int(reidsults))
selectedMResultSeries = resultidu
myresultSeriesExport = None
if request.POST['useDates'] == 'true':
useDates = True
else:
useDates = False
if useDates:
if 'endDate' in request.POST:
# print(entered_end_date)
entered_end_date = request.POST['endDate']
if 'startDate' in request.POST:
entered_start_date = request.POST['startDate']
myresultSeriesExport = Timeseriesresultvaluesextwannotations.objects.all() \
.filter(valuedatetime__gte=entered_start_date) \
.filter(valuedatetime__lte=entered_end_date) \
.filter(resultid__in=selectedMResultSeries).order_by('-valuedatetime')
else:
myresultSeriesExport = Timeseriesresultvaluesextwannotations.objects.all() \
.filter(resultid__in=selectedMResultSeries).order_by('-valuedatetime')
# print('email spreadsheet')
emailspreadsheet2(request, myresultSeriesExport, False) # for command str_selectedresultid_ids
# .after_response
emailsent=True
return HttpResponse({'prefixpath': settings.CUSTOM_TEMPLATE_PATH,
'emailsent': emailsent,
'outEmail': outEmail,},content_type='application/json')
def hysterisisMetrics(discharge,response):
hystdict = {}
return hystdict
# https://bsou.io/posts/color-gradients-with-python
def hex_to_RGB(hex):
''' "#FFFFFF" -> [255,255,255] '''
# Pass 16 to the integer function for change of base
return [int(hex[i:i+2], 16) for i in range(1,6,2)]
def RGB_to_hex(RGB):
''' [255,255,255] -> "#FFFFFF" '''
# Components need to be integers for hex to make sense
RGB = [int(x) for x in RGB]
return "#"+"".join(["0{0:x}".format(v) if v < 16 else
"{0:x}".format(v) for v in RGB])
def color_dict(gradient):
''' Takes in a list of RGB sub-lists and returns dictionary of
colors in RGB and hex form for use in a graphing function
defined later on '''
return {"hex":[RGB_to_hex(RGB) for RGB in gradient],
"r":[RGB[0] for RGB in gradient],
"g":[RGB[1] for RGB in gradient],
"b":[RGB[2] for RGB in gradient]}
def linear_gradient(start_hex, finish_hex="#FFFFFF", n=10):
''' returns a gradient list of (n) colors between
two hex colors. start_hex and finish_hex
should be the full six-digit color string,
inlcuding the number sign ("#FFFFFF") '''
# Starting and ending colors in RGB form
s = hex_to_RGB(start_hex)
f = hex_to_RGB(finish_hex)
# Initilize a list of the output colors with the starting color
RGB_list = [s]
# Calcuate a color at each evenly spaced value of t from 1 to n
for t in range(1, n):
# Interpolate RGB vector for color at the current value of t
curr_vector = [
int(s[j] + (float(t)/(n-1))*(f[j]-s[j]))
for j in range(3)
]
# Add it to our list of output colors
RGB_list.append(curr_vector)
return color_dict(RGB_list)
def TimeSeriesGraphingShort(request, feature_action='NotSet', samplingfeature='NotSet',
dataset='NotSet',dischargeresult='NotSet',
resultidu='NotSet', startdate='NotSet', enddate='NotSet',
popup='NotSet'): # ,startdate='',enddate=''
mergeResults='false'
authenticated = True
hystdict = None
if not request.user.is_authenticated:
# return HttpResponseRedirect('../')
authenticated = False
if popup == 'NotSet':
template = loader.get_template('chart2.html')
elif popup == 'smll':
template = loader.get_template('chartsmall.html')
elif popup == 'Anno':
if not authenticated:
return HttpResponseRedirect(settings.CUSTOM_TEMPLATE_PATH)
template = loader.get_template('chartAnnotation.html')
elif popup == 'hyst':
if not authenticated:
return HttpResponseRedirect(settings.CUSTOM_TEMPLATE_PATH)
template = loader.get_template('hysteresisChart.html')
else:
template = loader.get_template('chartpopup.html')
data_disclaimer = settings.DATA_DISCLAIMER
map_config = settings.MAP_CONFIG
useDataset = False
useSamplingFeature = False
# samplingfeature = None
# if 'annotation' in request.POST:
# pass
# raise ValidationError(request.POST['annotation'])
if dataset == 'NotSet':
if samplingfeature == 'NotSet':
feature_action = int(feature_action)
else:
samplingfeature = int(samplingfeature)
useSamplingFeature = True
else:
useDataset = True
dataset = int(dataset)
if resultidu != 'NotSet':
try:
# print(resultidu)
resultidu = [int(resultidu)]
except:
resultids = re.findall(r'\d+',resultidu)
resultidu = []
mergeResults = 'true'
for results in resultids:
resultidu.append(int(results))
selected_results = []
name_of_sampling_features = []
# name_of_variables = []
name_of_units = []
myresultSeries = []
data = {}
featureActionLocation = None
featureActionMethod = None
datasetTitle = None
datasetAbstract = None
methods = None
resultListGrouped = None
# print(settings.MAP_CONFIG['result_value_processing_levels_to_display'])
if not useDataset:
if useSamplingFeature:
samplefeature = Samplingfeatures.objects.filter(samplingfeatureid=samplingfeature).get()
feature_actions = Featureactions.objects.filter(samplingfeatureid=samplefeature)
resultList = Results.objects.filter(featureactionid__in=feature_actions).filter(
processing_level__in=settings.MAP_CONFIG['result_value_processing_levels_to_display']
).order_by("featureactionid","resultid")
resultListGrouped = groupResultsByVariable(samplefeature)
actions = Actions.objects.filter(actionid__in=feature_actions.values("action"))
methods = Methods.objects.filter(methodid__in=actions.values("method"))
featureActionLocation = samplefeature.samplingfeaturename
else:
resultList = Results.objects.filter(featureactionid=feature_action).filter(
processing_level__in=settings.MAP_CONFIG['result_value_processing_levels_to_display']
).order_by("featureactionid","resultid")
featureAction = Featureactions.objects.filter(featureactionid=feature_action).get()
featureActionLocation = featureAction.samplingfeatureid.samplingfeaturename
resultListGrouped = groupResultsByVariable(featureAction.samplingfeatureid)
featureActionMethod = featureAction.action.method.methodname
action = Actions.objects.filter(actionid=featureAction.action.actionid).get()
methods = Methods.objects.filter(methodid=action.method.methodid)
else:
datasetResults = Datasetsresults.objects.filter(datasetid=dataset)
resultList = Results.objects.filter(resultid__in=datasetResults.values("resultid")).filter(
processing_level__in=settings.MAP_CONFIG['result_value_processing_levels_to_display']
).order_by("featureactionid","resultid")
datasetTitle = Datasets.objects.filter(datasetid=dataset).get().datasettitle
datasetAbstract = Datasets.objects.filter(datasetid=dataset).get().datasetabstract
numresults = resultList.count()
selectedMResultSeries = []
mergedResultSets = []
for i in range(0, numresults):
selectionStr = str('selection' + str(i))
# when annotating you can only select a single time series
# with a radio button
if mergeResults == 'true':
selectionStr = str('Mergedselection' + str(i))
if popup == 'Anno':
selectionStr = str('selection')
if selectionStr in request.POST:
# raise ValidationError(request.POST[selectionStr])
# print(request.POST[selectionStr])
if mergeResults =='true':
mergedresults = re.findall('\d+', request.POST[selectionStr])
mergedResultSets.append(mergedresults)
for mergedresult in mergedresults:
for result in resultList:
if int(mergedresult) == result.resultid:
selectedMResultSeries.append(int(mergedresult))
else:
for result in resultList:
if int(request.POST[selectionStr]) == result.resultid:
selectedMResultSeries.append(int(request.POST[selectionStr]))
# if we are annotating we only have a single selection to find
if popup == 'Anno':
break
# selectedMResultSeries = Results.objects.filter(featureactionid=feature_action)
i = 0
if selectedMResultSeries.__len__() == 0:
if resultidu == 'NotSet':
try:
selectedMResultSeries.append(resultList[0].resultid)
except IndexError:
html = "<html><body>No Data Available Yet.</body></html>"
return HttpResponse(html)
else:
try:
for resultid in resultidu:
selectedMResultSeries.append(resultid)
except ObjectDoesNotExist:
html = "<html><body>No Data Available Yet.</body></html>"
return HttpResponse(html)
if 'endDate' in request.POST:
entered_end_date = request.POST['endDate']
elif not enddate == 'NotSet':
entered_end_date = enddate
else:
entered_end_date = \
Timeseriesresultvalues.objects.filter(resultid__in=selectedMResultSeries).annotate(
Max('valuedatetime')).order_by(
'-valuedatetime')[0].valuedatetime.strftime('%Y-%m-%d %H:%M')
if 'startDate' in request.POST:
entered_start_date = request.POST['startDate']
elif not startdate == 'NotSet':
entered_start_date = startdate
else:
# entered_start_date= Measurementresultvalues.objects.
# filter(resultid__in=selectedMResultSeries).annotate(Min('valuedatetime')).\
# order_by('valuedatetime')[0].valuedatetime.strftime('%Y-%m-%d %H:%M')
# .annotate(Min('price')).order_by('price')[0]
datetime_entered_end_date = datetime.strptime(entered_end_date, '%Y-%m-%d %H:%M')
if popup == 'smll':
entered_start_date = datetime_entered_end_date - timedelta(
settings.SENSOR_DASHBOARD['time_series_days'])
elif popup =='Anno' or popup =='hyst':
entered_start_date = datetime_entered_end_date - timedelta(
settings.SENSOR_DASHBOARD['time_series_days'])
else:
entered_start_date = datetime_entered_end_date - timedelta(
map_config['time_series_months'] * 365 / 12) # .strftime('%Y-%m-%d %H:%M')
entered_start_date = entered_start_date.strftime('%Y-%m-%d %H:%M')
if mergeResults == 'false':
for selectedMResult in selectedMResultSeries:
i += 1
selected_result = Results.objects.filter(resultid=selectedMResult).get()
selected_results.append(selected_result)
# name_of_sampling_features.append(get_name_of_sampling_feature(selected_result))
tmpname = get_name_of_sampling_feature(selected_result)
name_of_sampling_features.append(tmpname)
myresultSeries.append(Timeseriesresultvalues.objects.all()
.filter(~Q(datavalue__lte=selected_result.variableid.nodatavalue))
.filter(valuedatetime__gte=entered_start_date)
.filter(valuedatetime__lte=entered_end_date)
.filter(resultid=selectedMResult).order_by('-valuedatetime'))
data.update({'datavalue' + str(i): []})
else:
if len(mergedResultSets) > 0:
# print(mergedResultSets)
for mergedResultSet in mergedResultSets:
i += 1
selected_result = Results.objects.filter(resultid__in=mergedResultSet).first()
# print('result set')
# print(mergedResultSet)
# print(selected_result)
selected_results.append(selected_result)
# name_of_sampling_features.append(get_name_of_sampling_feature(selected_result))
tmpname = get_name_of_sampling_feature(selected_result)
name_of_sampling_features.append(tmpname)
myresultSeries.append(Timeseriesresultvalues.objects.all()
.filter(~Q(datavalue__lte=selected_result.variableid.nodatavalue))
.filter(valuedatetime__gte=entered_start_date)
.filter(valuedatetime__lte=entered_end_date)
.filter(resultid__in=mergedResultSet).order_by('-valuedatetime'))
data.update({'datavalue' + str(i): []})
else:
i=1
selected_result = Results.objects.filter(resultid__in=selectedMResultSeries).first()
selected_results.append(selected_result)
# name_of_sampling_features.append(get_name_of_sampling_feature(selected_result))
tmpname = get_name_of_sampling_feature(selected_result)
name_of_sampling_features.append(tmpname)
myresultSeries.append(Timeseriesresultvalues.objects.all()
.filter(~Q(datavalue__lte=selected_result.variableid.nodatavalue))
.filter(valuedatetime__gte=entered_start_date)
.filter(valuedatetime__lte=entered_end_date)
.filter(resultid__in=selectedMResultSeries).order_by('-valuedatetime'))
data.update({'datavalue' + str(i): []})
i = 0
annotationsexist = False
# print(selectedMResultSeries)
if popup == 'Anno':
tsrvas = Timeseriesresultvalueannotations.objects.filter(
valueid__resultid__in=selectedMResultSeries).filter(
valueid__valuedatetime__gt=entered_start_date).filter(
valueid__valuedatetime__lt=entered_end_date)
if tsrvas.count() > 0:
# print('time series result value annotation count ' + str(tsrvas.count()))
# (tsrvas.query)
annotationsexist = True
#print('series')
#print(myresultSeries)
for myresults in myresultSeries:
i += 1
resultannotationsexist = False
print('response count ' + str(myresults.count()))
# print('1st result')
# print(myresults[0])
if popup == 'hyst':
result = Results
fa = Featureactions.objects.filter(featureactionid=selected_results[0].featureactionid.featureactionid).get()
sf = Samplingfeatures.objects.filter(samplingfeatureid=fa.samplingfeatureid.samplingfeatureid).get()
fas = Featureactions.objects.filter(samplingfeatureid=sf)
units = Units.objects.filter(unit_type='Volumetric flow rate')
dischargeRs = None
if not dischargeresult == 'NotSet':
dischargeRs = Results.objects.filter(resultid=int(dischargeresult))
else:
dischargeRs = Results.objects.filter(featureactionid__in=fas).filter(unitsid__in=units)
dischargeR = dischargeRs.first()
dischargeTSR = Timeseriesresults.objects.filter(resultid=dischargeR).get()
# print(dischargeTSR)
tsrvdischarge = Timeseriesresultvalues.objects.filter(~Q(datavalue__lte=dischargeR.variableid.nodatavalue))\
.filter(valuedatetime__gte=entered_start_date)\
.filter(valuedatetime__lte=entered_end_date)\
.filter(resultid=dischargeTSR).order_by('-valuedatetime')
# print(tsrvdischarge.query)
# print(tsrvdischarge.count())
hystdict = hysterisisMetrics(tsrvdischarge,myresults)
if not popup=='hyst':
for result in myresults:
start = datetime(1970, 1, 1)
delta = result.valuedatetime - start
mills = delta.total_seconds() * 1000
if math.isnan(result.datavalue):
dataval = 'null'
else:
dataval = result.datavalue
# print(data.keys())
if popup == 'Anno':
data['datavalue' + str(i)].append(
{'x': mills, 'y': dataval, 'id': str(result.valueid)})
else:
data['datavalue' + str(i)].append(
[mills,dataval])
if popup == 'Anno':
for tsrva in tsrvas:
if tsrva.valueid == result:
# print('tsrv annotation value id ' + str(tsrva.valueid))
if not resultannotationsexist:
# print('resultannotationsexist')
resultannotationsexist = True
data.update({'datavalueannotated' : []})
data['datavalueannotated'].append(
{'x':mills,'y':dataval,'id':str(result.valueid)})
else:
valcount = len(myresults)
colors = linear_gradient('#BF001B','#00E5C4',n=valcount)# ['#00E5C4','#00E17D','#00DD38','#09D900','#49D500','#86D200','#C2CE00','#CA9900','#C65A00','#C21E00',]
hexcolors = colors['hex']
print(valcount)
k=0
for result, discharge in zip(myresults,tsrvdischarge):
if math.isnan(result.datavalue):
dataval = 'null'
else:
dataval = result.datavalue
if math.isnan(discharge.datavalue):
dischargeval = 'null'
else:
dischargeval = discharge.datavalue # + " " + str(discharge.valuedatetime)
start = datetime(1970, 1, 1)
delta = discharge.valuedatetime - start
mills = delta.total_seconds() * 1000
data['datavalue' + str(i)].append(
{'x':dischargeval,'y':dataval,'dateTime':mills,'color':hexcolors[k]}) # [dischargeval,dataval]
#if threshold == result.valueid:
k+=1
timeseriesresults = Timeseriesresults.objects.\
filter(resultid__in=resultList.values("resultid")).\
order_by("resultid__variableid", "aggregationstatisticcv")
# build strings for graph labels
# print('data')
# print(data)
i = 0
seriesStr = ''
unit = ''
location = ''
variable = ''
aggStatistic = ''
series = []
# print('selected result series')
# print(selectedMResultSeries)
r = Results.objects.filter(resultid__in=selectedMResultSeries)\
.order_by("featureactionid","resultid") # .order_by("unitsid")
tsrs = Timeseriesresults.objects.filter(resultid__in=selectedMResultSeries)\
.order_by("resultid__resultid__featureactionid","resultid")
L1exists = False
for selectedMResult in r:
i += 1
tsr = tsrs.get(resultid=selectedMResult)
aggStatistic = tsr.aggregationstatisticcv
unit = selectedMResult.unitsid.unitsabbreviation
variable = selectedMResult.variableid.variable_name
location = selectedMResult.featureactionid.samplingfeatureid.samplingfeaturename
if i == 1 and not unit == '':
seriesStr += str(unit)
name_of_units.append(str(unit))
elif not unit == '':
seriesStr += ' - ' + str(unit)
name_of_units.append(str(unit))
# print('series unit and var')
# print(str(unit) + ' - ' + str(variable))
# print(len(mergedResultSets))
if not popup=='hyst':
series.append({"name": str(unit) + ' - ' + str(variable) + ' - ' +
str(aggStatistic) + ' - ' + str(location), "allowPointSelect": "true", "yAxis": str(unit),
"data": data['datavalue' + str(i)], "point": { }})
else: # build color zones
vals = len(data['datavalue' + str(i)])
ii=0
j=10
thresholds = []
# print(vals)
for datum in data['datavalue' + str(i)]:
ii+=1
# print(ii)
# print(int(round(vals/j)))
if ii== int(round(vals/j)):
j-=1
thresholds.append(datum['y'])
zones = []
# print(thresholds)
for ii in range(1, len(thresholds)):
threshold = thresholds.pop()
if not threshold == 'null':
dict = {'value':float(threshold),'className':'zone-'+str(ii)}
zones.append(dict)
# print('zones')
# print(zones)
true = 'true'
two = 2
series.append({"name": str(unit) + ' - ' + str(variable) + ' - ' +
str(aggStatistic) + ' - ' + str(location), "allowPointSelect": "true", "yAxis": str(unit),
"lineWidth":two,"data": data['datavalue' + str(i)], "zones": zones})
# "plotOptions": {"maker": {"enabled": true},
if mergeResults =='true' and len(mergedResultSets) <= i:
break
if popup == 'Anno':
relatedtsr = Timeseriesresults.objects.select_related('resultid').filter(
resultid__featureactionid = selectedMResult.featureactionid).filter(
resultid__variableid = selectedMResult.variableid
).filter(resultid__unitsid = selectedMResult.unitsid).filter(intendedtimespacing = tsr.intendedtimespacing
).filter(intendedtimespacingunitsid = tsr.intendedtimespacingunitsid)
relatedresults = Results.objects.filter(resultid__in=relatedtsr)
print(relatedresults)
#relatedresults = Results.objects.filter(
# featureactionid = selectedMResult.featureactionid).filter(
# variableid = selectedMResult.variableid
#).filter(unitsid = selectedMResult.unitsid)
for rr in relatedresults:
if rr.processing_level.processinglevelid ==2:
L1exists = True
if annotationsexist:
series.append({"name": 'Annotated ' + str(unit) + ' - ' + str(variable) + ' - ' +
str(aggStatistic) + ' - ' + str(location), "allowPointSelect": "true", "yAxis": str(unit),
"data": data['datavalueannotated'], "point": { }})
# "point": { "events": {'click': 'selectPointsByClick'}}
i = 0
titleStr = ''
# print(series)
i = 0
name_of_sampling_features = set(name_of_sampling_features)
for name_of_sampling_feature in name_of_sampling_features:
i += 1
if i == 1:
titleStr += name_of_sampling_feature # + ', ' +name_of_variable
else:
titleStr += ' - ' + name_of_sampling_feature # +name_of_variable+ ', '
chartID = 'chart_id'
chart = {"renderTo": chartID, "type": 'scatter', "zoomType": 'xy'}
title2 = {"text": titleStr}
graphType = 'scatter'
if not popup=='hyst':
xAxis = {"type": 'datetime', "title": {"text": 'Date'}}
else:
xAxis = {"title": {"text": 'Discharge'}} # "dateTimeLabelFormats":{}
chart = {"renderTo": chartID, "type": 'scatter', "zoomType": 'xy'}
graphType = 'scatter'
yAxis = {"title": {"text": seriesStr}}
int_selectedresultid_ids = []
str_selectedresultid_ids = []
for int_selectedresultid in selectedMResultSeries:
int_selectedresultid_ids.append(int(int_selectedresultid))
str_selectedresultid_ids.append(str(int_selectedresultid))
csvexport = False
cvqualitycode = None
if popup == 'Anno':
cvqualitycode = CvQualitycode.objects.all().order_by('definition')
# csvexport = True
# k=0
# myfile = StringIO.StringIO()
# for myresults in myresultSeriesExport:
# for result in myresults:
# if k==0:
# myfile.write(result.csvheader())
# myfile.write('\n')
# myfile.write(result.csvoutput())
# myfile.write('\n')
# k+=1
# response = HttpResponse(myfile.getvalue(),content_type='text/csv')
# response['Content-Disposition'] = 'attachment; filename="mydata.csv"'
# if csvexport:
# return response
# else:
# raise ValidationError(relatedFeatureList)
for result in resultList:
tsr = Timeseriesresults.objects.filter(resultid=result).get()
result.timeintervalunits = tsr.intendedtimespacingunitsid
result.timeinterval = tsr.intendedtimespacing
responsedict = {'prefixpath': settings.CUSTOM_TEMPLATE_PATH,
'startDate': entered_start_date,
'endDate': entered_end_date,
'popup': popup,
'mergeResults':mergeResults,
'resultListGrouped':resultListGrouped,
# 'emailsent': emailsent,
# 'outEmail': outEmail,
'useSamplingFeature': useSamplingFeature,
'featureActionMethod': featureActionMethod,
'featureActionLocation': featureActionLocation,
'cvqualitycode': cvqualitycode,
'data_disclaimer': data_disclaimer,
'datasetTitle': datasetTitle,
'datasetAbstract': datasetAbstract,
'useDataset': useDataset,
'startdate': startdate,
'enddate': enddate,
'L1exists': L1exists,
'SelectedResults': int_selectedresultid_ids,
'authenticated': authenticated,
'methods': methods,
'timeseriesresults': timeseriesresults,
'chartID': chartID, 'chart': chart,
'series': series,
'title2': title2, 'resultList': resultList,
'graphType': graphType, 'xAxis': xAxis,
'yAxis': yAxis,
'name_of_units': name_of_units}
if hystdict:
z = hystdict.copy()
z.update(responsedict)
responsedict = z
return TemplateResponse(request, template, responsedict, )
#
# From http://stackoverflow.com/questions/8200342/removing-duplicate-strings-from-a-list-in-python
def removeDupsFromListOfStrings(listOfStrings):
seen = set()
result = []
for item in listOfStrings:
if item not in seen:
seen.add(item)
result.append(item)
return result
def scatter_plot(request):
authenticated = True
if not request.user.is_authenticated:
authenticated = False
xVariableSelection = yVariableSelection = fieldarea1 = fieldarea2 = filteredFeatures = None
xVar = None
yVar = None
title = None
if 'fieldarea1' in request.POST and 'fieldarea2' not in request.POST:
if not request.POST['fieldarea1'] == 'All':
fieldarea1 = request.POST['fieldarea1']
fieldarea1RF = Relatedfeatures.objects.filter(relatedfeatureid=fieldarea1)
filteredFeatures = Samplingfeatures.objects.filter(
samplingfeatureid__in=fieldarea1RF.values("samplingfeatureid"))
fieldarea1 = Samplingfeatures.objects.filter(samplingfeatureid=fieldarea1).get()
if 'fieldarea1' in request.POST and 'fieldarea2' in request.POST:
if not request.POST['fieldarea1'] == 'All' and not request.POST['fieldarea2'] == 'All':
fieldarea1 = request.POST['fieldarea1']
fieldarea2 = request.POST['fieldarea2']
fieldareaRF1 = Relatedfeatures.objects.filter(relatedfeatureid=fieldarea1)
fieldareaRF2 = Relatedfeatures.objects.filter(relatedfeatureid=fieldarea2)
# fieldareaRF = fieldarea1RF & fieldarea2RF #only sampling features in 1 and 2
filteredFeatures = Samplingfeatures.objects.filter(
samplingfeatureid__in=fieldareaRF1.values("samplingfeatureid")) \
.filter(samplingfeatureid__in=fieldareaRF2.values("samplingfeatureid"))
fieldarea1 = Samplingfeatures.objects.filter(samplingfeatureid=fieldarea1).get()
fieldarea2 = Samplingfeatures.objects.filter(samplingfeatureid=fieldarea2).get()
title = str(fieldarea1.samplingfeaturecode) + " - " + str(
fieldarea2.samplingfeaturecode) + " : "
if 'xVariableSelection' and 'yVariableSelection' in request.POST:
xVariableSelection = request.POST['xVariableSelection']
yVariableSelection = request.POST['yVariableSelection']
xVar = Variables.objects.filter(variableid=xVariableSelection).get()
yVar = Variables.objects.filter(variableid=yVariableSelection).get()
xVariableSelection = Variables.objects.filter(variableid=xVariableSelection).get()
yVariableSelection = Variables.objects.filter(variableid=yVariableSelection).get()
if title:
title = title + str(xVar.variablecode) + " - " + str(yVar.variablecode)
else:
title = str(xVar.variablecode) + " - " + str(yVar.variablecode)
prv = Profileresults.objects.all()
# second filter = exclude summary results attached to field areas
pr = Results.objects.filter(resultid__in=prv).filter(
~Q(
featureactionid__samplingfeatureid__sampling_feature_type="Ecological land "
"classification")).filter(
~Q(featureactionid__samplingfeatureid__sampling_feature_type="Field area"))
# variables is the list to pass to the html template
variables = Variables.objects.filter(variableid__in=pr.values("variableid"))
fieldareas = Samplingfeatures.objects.filter(
sampling_feature_type="Ecological land classification") # Field area
xlocation = []
ylocation = []
xdata = []
ydata = []
prvx = prvy = xlocs = ylocs = None
if xVar and yVar:
rvx = pr.filter(variableid=xVar).values('resultid')
prvx = Profileresultvalues.objects.filter(~Q(datavalue=-6999)) \
.filter(~Q(datavalue=-888.88)).filter(resultid__in=rvx).order_by(
"resultid__resultid__unitsid",
"resultid__resultid__featureactionid__samplingfeatureid",
"zlocation")
rvy = pr.filter(variableid=yVar).values('resultid')
prvy = Profileresultvalues.objects.filter(~Q(datavalue=-6999)) \
.filter(~Q(datavalue=-888.88)).filter(resultid__in=rvy).order_by(
"resultid__resultid__unitsid",
"resultid__resultid__featureactionid__samplingfeatureid",
"zlocation")
xr = Results.objects.filter(resultid__in=prvx.values("resultid"))
xfa = Featureactions.objects.filter(featureactionid__in=xr.values("featureactionid"))
if filteredFeatures:
xlocs = Samplingfeatures.objects.filter(
samplingfeatureid__in=xfa.values("samplingfeatureid")).filter(
samplingfeatureid__in=filteredFeatures)
else:
xlocs = Samplingfeatures.objects.filter(
samplingfeatureid__in=xfa.values("samplingfeatureid"))
# xlocation = re.sub('[^A-Za-z0-9]+', '', xlocation)
yr = Results.objects.filter(resultid__in=prvy.values("resultid"))
yfa = Featureactions.objects.filter(featureactionid__in=yr.values("featureactionid"))
if filteredFeatures:
ylocs = Samplingfeatures.objects.filter(
samplingfeatureid__in=yfa.values("samplingfeatureid")).filter(
samplingfeatureid__in=filteredFeatures)
else:
ylocs = Samplingfeatures.objects.filter(
samplingfeatureid__in=yfa.values("samplingfeatureid"))
if prvx and prvx:
prvx = prvx.filter(resultid__resultid__featureactionid__samplingfeatureid__in=xlocs)
prvy = prvy.filter(resultid__resultid__featureactionid__samplingfeatureid__in=ylocs)
for x in prvx:
xdata.append(
str(
x.datavalue
) + ";" + str(
x.resultid.resultid.unitsid.unitsabbreviation
) + ";" + str(
x.zlocation
) + ";" + str(
x.resultid.resultid.featureactionid.samplingfeatureid.samplingfeaturename
)
)
tmpLoc = "{0} {1}-{2} {3};{4};{5};{6};{7}".format(str(
x.resultid.resultid.featureactionid.samplingfeatureid.samplingfeaturename
), str(
x.zlocation - x.zaggregationinterval
), str(
x.zlocation
), str(
x.zlocationunitsid.unitsabbreviation
), str(
x.resultid.resultid.unitsid.unitsabbreviation
), str(
x.zlocation
), str(
x.resultid.resultid.featureactionid.samplingfeatureid.samplingfeaturename
), str(
x.resultid.resultid.unitsid.unitsabbreviation
))
xlocation.append(tmpLoc)
for y in prvy:
ydata.append(
str(y.datavalue) + ";" + str(
y.resultid.resultid.unitsid.unitsabbreviation) + ";" + str(y.zlocation) +
";" + str(
y.resultid.resultid.featureactionid.samplingfeatureid.samplingfeaturename))
foundloc = False
for x in prvx:
if x.zlocation == y.zlocation or x.resultid.resultid.featureactionid \
.samplingfeatureid.samplingfeaturename == y.resultid.resultid \
.featureactionid.samplingfeatureid.samplingfeaturename:
foundloc = True
tmpLoc = "{0} {1}-{2} {3};{4};{5};{6};{7}".format(
str(
y.resultid.resultid
.featureactionid.samplingfeatureid.samplingfeaturename
),
str(y.zlocation - y.zaggregationinterval), str(y.zlocation),
str(y.zlocationunitsid.unitsabbreviation),
str(y.resultid.resultid.unitsid.unitsabbreviation), str(y.zlocation),
str(y.resultid.resultid.featureactionid
.samplingfeatureid.samplingfeaturename),
str(y.resultid.resultid.unitsid.unitsabbreviation)
)
if not foundloc:
xlocation.append(tmpLoc)
# xlocation.append(tmpLoc)
chartID = 'chart_id'
chart = {"renderTo": chartID, "type": 'scatter', "zoomType": 'xy'}
title2 = {"text": title}
# xAxis = {"categories":xAxisCategories,} #"type": 'category',
# "title": {"text": xAxisCategories},
yAxis = {"title": {"text": str(yVar)}}
xAxis = {"title": {"text": str(xVar)}}
graphType = 'scatter'
if 'export_data' in request.POST:
resultValuesSeries = prvx | prvy
response = exportspreadsheet(request, resultValuesSeries)
return response
return TemplateResponse(request, 'soilsscatterplot.html',
{'prefixpath': settings.CUSTOM_TEMPLATE_PATH,
'data_disclaimer': settings.DATA_DISCLAIMER,
'xVariables': variables, 'yVariables': variables,
'authenticated': authenticated,
'xVariableSelection': xVariableSelection,
'yVariableSelection': yVariableSelection,
'fieldarea1': fieldarea1, 'fieldarea2': fieldarea2,
'fieldareas': fieldareas,
'chartID': chartID, 'chart': chart, 'title2': title2,
'graphType': graphType,
'yAxis': yAxis, 'xAxis': xAxis, 'xdata': xdata, 'ydata': ydata,
'ylocation': ylocation,
'xlocation': xlocation, 'name': request.user,
'site_title': admin.site.site_title,
'site_header': admin.site.site_header,
'short_title': 'Soils Scatter Plot'}, )
def exportcitations(request, citations, csv):
myfile = StringIO()
first = True
citationpropvalues = Citationextensionpropertyvalues.objects.filter(
citationid__in=citations).order_by("propertyid")
authorheader = Authorlists.objects.filter(citationid__in=citations).order_by(
"authororder").distinct("authororder")
# MyTable.objects.extra(select={'int_name': 'CAST(t.name AS INTEGER)'},
# order_by=['int_name'])
authheadercount = authorheader.__len__()
citationpropheaders = citationpropvalues.distinct("propertyid").order_by("propertyid")
for citation in citations:
if first and csv:
myfile.write(citation.csvheader())
for auth in authorheader:
myfile.write(auth.csvheader())
for citationprop in citationpropheaders:
myfile.write(citationprop.csvheader())
myfile.write('\n')
if csv:
myfile.write(citation.csvoutput())
else: # endnote instead
myfile.write(citation.endnoteexport())
# export authors
authors = Authorlists.objects.filter(citationid=citation).order_by("authororder")
authcount = authors.__len__()
for auth in authors:
if csv:
myfile.write(auth.csvoutput())
else:
myfile.write(auth.endnoteexport())
if csv:
for i in range(0, authheadercount - authcount, 1):
myfile.write('"",')
thiscitationpropvalues = citationpropvalues.filter(citationid=citation).order_by(
"propertyid")
for matchheader in citationpropheaders:
headermatched = False
for citationprop in thiscitationpropvalues:
headermatched = False
if matchheader.propertyid == citationprop.propertyid:
headermatched = True
if csv and headermatched:
myfile.write(citationprop.csvoutput())
elif headermatched:
myfile.write(citationprop.endnoteexport())
if headermatched:
break
if not headermatched and csv:
myfile.write('"",')
if csv:
myfile.write('\n')
else:
myfile.write('ER - \r\n\r\n')
first = False
if csv:
response = HttpResponse(myfile.getvalue(), content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="mycitations.csv"'
else:
response = HttpResponse(myfile.getvalue(), content_type='text/txt')
response['Content-Disposition'] = 'attachment; filename="myCitationsEndNoteImport.txt"'
return response
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def grecaptcha_verify(request):
if request.method == 'POST':
response = {}
data = request.POST
captcha_rs = data.get('g_recaptcha_response')
url = "https://www.google.com/recaptcha/api/siteverify"
params = {
'secret': settings.RECAPTCHA_PRIVATE_KEY,
'response': captcha_rs,
'remoteip': get_client_ip(request)
}
verify_rs = requests.get(url, params=params, verify=True)
verify_rs = verify_rs.json()
response["status"] = verify_rs.get("success", False)
response['message'] = verify_rs.get('error-codes', None) or "Unspecified error."
return response
def emailspreadsheet(request, resultValuesSeries, profileResult=True):
# if the user hit the export csv button export the measurement results to csv
response = grecaptcha_verify(request)
captach_status = response["status"]
out_email = ''
entered_start_date =''
entered_end_date =''
use_dates =''
emailsent = False
if captach_status:
if 'startDate' in request.POST:
entered_start_date = request.POST['startDate']
if 'endDate' in request.POST:
entered_end_date = request.POST['endDate']
if 'useDates' in request.POST:
use_dates = request.POST['useDates']
if 'outEmail' in request.POST:
outgoingemail = request.POST['outEmail']
management.call_command('export_timeseriesresultvaluesextwannotations', outgoingemail,
entered_start_date, entered_end_date, use_dates,
resultValuesSeries)
emailsent = True
return emailsent
# @after_response.enable
def emailspreadsheet2(request, resultValuesSeries, profileResult=True):
response = grecaptcha_verify(request)
captach_status = response["status"]
# captach_status = True
outgoingemail = ''
entered_start_date =''
entered_end_date =''
use_dates =''
emailsent = False
emailtitle = 'your ODM2 Admin data is attached'
emailtext = 'Attached are results for the following time series: '
if captach_status:
# print("captach ok")
if 'outEmail' in request.POST:
outgoingemail = request.POST['outEmail']
# print(outgoingemail)
tolist = []
tolist.append(str(outgoingemail))
# print(tolist)
myfile = StringIO()
# raise ValidationError(resultValues)
k = 0
variablesAndUnits = []
variable = ''
unit = ''
firstheader = True
processingCode = None
lastResult = None
newResult = None
resultValuesHeaders = resultValuesSeries.filter(
~Q(
samplingfeaturetypecv="Ecological land classification" # noqa
)
).filter(
~Q(
samplingfeaturetypecv="Field area" # noqa
)
).order_by(
"resultid", "variablecode", "unitsabbreviation",
"processinglevelcode"
)
# .distinct("resultid__resultid__variableid","resultid__resultid__unitsid")
for myresults in resultValuesHeaders:
lastResult = newResult
newResult = myresults
lastVariable = variable
variable = myresults.variablecode
lastUnit = unit
unit = myresults.unitsabbreviation
lastProcessingCode = processingCode
processingCode = myresults.processinglevelcode
# if not firstheader and firstVar==variable and firstUnit==unit:
# only add the first instance of each variable, once one repeats your done.
# break
if not lastVariable == variable or not lastUnit == unit or not lastProcessingCode == \
processingCode or not newResult.resultid == lastResult.resultid:
variablesAndUnits.append(variable + unit + processingCode +str(newResult.resultid))
if firstheader:
myfile.write(myresults.csvheader())
firstheader = False
myfile.write(myresults.csvheaderShort())
emailtext = emailtext + ' - ' + str(myresults.email_text())
# elif not lastUnit==unit:
# myfile.write(myresults.csvheaderShortUnitOnly())
if profileResult:
resultValuesSeries = resultValuesSeries.filter(
~Q(
resultid__resultid__featureactionid__samplingfeatureid__sampling_feature_type="Ecological land classification" # noqa
)
).filter(
~Q(resultid__resultid__featureactionid__samplingfeatureid__sampling_feature_type="Field area" # noqa
)
).order_by(
"resultid__resultid__featureactionid__samplingfeatureid__samplingfeaturecode",
"resultid__intendedzspacing", "resultid__resultid__variableid",
"resultid__resultid__unitsid__unitsabbreviation"
)
else:
resultValuesSeries = resultValuesSeries.filter(
~Q(samplingfeaturetypecv="Ecological land classification") # noqa
).filter(
~Q(samplingfeaturetypecv="Field area" # noqa
)
).order_by(
"valuedatetime",
"resultid",
"samplingfeaturename",
"variablecode", "unitsabbreviation",
"processinglevelcode"
)
# myfile.write(lastResult.csvheaderShort())
# emailtext = emailtext + ' - ' + str(lastResult.email_text())
myfile.write('\n')
samplingfeaturename = ''
lastsamplingfeaturename = ''
depth = 0
position = 0
time = None
# resultid__resultid__featureactionid__samplingfeatureid__samplingfeaturename
for myresults in resultValuesSeries:
lastResult = newResult
# #newResult = myresults
variable = myresults.variablecode
unit = myresults.unitsabbreviation
lastsamplingfeaturename = samplingfeaturename
samplingfeaturename = myresults.samplingfeaturename
lastDepth = depth
processingCode = myresults.processinglevelcode
if profileResult:
depth = myresults.resultid.intendedzspacing
if not k == 0 and (not lastsamplingfeaturename == samplingfeaturename or
not depth == lastDepth):
myfile.write('\n')
temp = myresults.csvoutput()
myfile.write(temp)
position = 0
elif k == 0:
temp = myresults.csvoutput()
myfile.write(temp)
else:
lastTime = time
time = myresults.valuedatetime
if not k == 0 and (not lastsamplingfeaturename == samplingfeaturename or
not time == lastTime):
myfile.write('\n')
temp = myresults.csvoutput()
myfile.write(temp)
position = 0
elif k == 0:
temp = myresults.csvoutput()
myfile.write(temp)
# else:
# if variablesAndUnits.index(unicode(variable)+unicode(unit)) ==position:
for i in range(
position,
variablesAndUnits.index(variable +
unit +
processingCode+str(myresults.resultid))
):
myfile.write(",")
myfile.write(",")
myfile.write(",")
position += 1
myfile.write(myresults.csvoutputShort())
position += 1
k += 1
# response = StreamingHttpResponse(myfile.getvalue(), content_type='text/csv')
# response['Content-Disposition'] = 'attachment; filename="mydata.csv"'
# print('email!!!!')
# print(settings.EMAIL_FROM_ADDRESS)
# print(emailtext)
# print(tolist)
email = EmailMessage(emailtitle,emailtext,
settings.EMAIL_FROM_ADDRESS, tolist)
email.attach('mydata.csv', myfile.getvalue(),'text/csv')
email.send()
return True
else:
# print("captcha not ok")
return False
def exportspreadsheet(request, resultValuesSeries, profileResult=True):
# if the user hit the export csv button export the measurement results to csv
myfile = StringIO.StringIO()
# raise ValidationError(resultValues)
k = 0
variablesAndUnits = []
variable = ''
unit = ''
firstheader = True
processingCode = None
resultValuesHeaders = resultValuesSeries.filter(
~Q(
resultid__resultid__featureactionid__samplingfeatureid__sampling_feature_type="Ecological land classification" # noqa
)
).filter(
~Q(
resultid__resultid__featureactionid__samplingfeatureid__sampling_feature_type="Field area" # noqa
)
).order_by(
"resultid__resultid__variableid", "resultid__resultid__unitsid",
"resultid__resultid__processing_level__processinglevelcode"
)
# .distinct("resultid__resultid__variableid","resultid__resultid__unitsid")
for myresults in resultValuesHeaders:
lastVariable = variable
variable = myresults.resultid.resultid.variableid.variablecode
lastUnit = unit
unit = myresults.resultid.resultid.unitsid.unitsabbreviation
lastProcessingCode = processingCode
processingCode = myresults.resultid.resultid.processing_level.processinglevelcode
# if not firstheader and firstVar==variable and firstUnit==unit:
# only add the first instance of each variable, once one repeats your done.
# break
if not lastVariable == variable or not lastUnit == unit or not lastProcessingCode == \
processingCode:
variablesAndUnits.append(variable + unit + processingCode)
if firstheader:
myfile.write(myresults.csvheader())
firstheader = False
myfile.write(myresults.csvheaderShort())
# elif not lastUnit==unit:
# myfile.write(myresults.csvheaderShortUnitOnly())
if profileResult:
resultValuesSeries = resultValuesSeries.filter(
~Q(
resultid__resultid__featureactionid__samplingfeatureid__sampling_feature_type="Ecological land classification" # noqa
)
).filter(
~Q(resultid__resultid__featureactionid__samplingfeatureid__sampling_feature_type="Field area" # noqa
)
).order_by(
"resultid__resultid__featureactionid__samplingfeatureid__samplingfeaturecode",
"resultid__intendedzspacing", "resultid__resultid__variableid",
"resultid__resultid__unitsid"
)
else:
resultValuesSeries = resultValuesSeries.filter(
~Q(
resultid__resultid__featureactionid__samplingfeatureid__sampling_feature_type="Ecological land classification") # noqa
).filter(
~Q(
resultid__resultid__featureactionid__samplingfeatureid__sampling_feature_type="Field area" # noqa
)
).order_by(
"valuedatetime",
"resultid__resultid__featureactionid__samplingfeatureid__samplingfeaturecode",
"resultid__resultid__variableid", "resultid__resultid__unitsid",
"resultid__resultid__processing_level__processinglevelcode"
)
# myfile.write(lastResult.csvheaderShort())
myfile.write('\n')
samplingFeatureCode = ''
depth = 0
position = 0
time = None
# resultid__resultid__featureactionid__samplingfeatureid__samplingfeaturecode
for myresults in resultValuesSeries:
variable = myresults.resultid.resultid.variableid.variablecode
unit = myresults.resultid.resultid.unitsid.unitsabbreviation
lastSamplingFeatureCode = samplingFeatureCode
samplingFeatureCode = myresults.resultid.resultid.featureactionid.samplingfeatureid \
.samplingfeaturecode
lastDepth = depth
processingCode = myresults.resultid.resultid.processing_level.processinglevelcode
if profileResult:
depth = myresults.resultid.intendedzspacing
if not k == 0 and (not lastSamplingFeatureCode == samplingFeatureCode or
not depth == lastDepth):
myfile.write('\n')
temp = myresults.csvoutput()
myfile.write(temp)
position = 0
elif k == 0:
temp = myresults.csvoutput()
myfile.write(temp)
else:
lastTime = time
time = myresults.valuedatetime
if not k == 0 and (not lastSamplingFeatureCode == samplingFeatureCode or
not time == lastTime):
myfile.write('\n')
temp = myresults.csvoutput()
myfile.write(temp)
position = 0
elif k == 0:
temp = myresults.csvoutput()
myfile.write(temp)
# else:
# if variablesAndUnits.index(unicode(variable)+unicode(unit)) ==position:
for i in range(
position,
variablesAndUnits.index(variable +
unit +
processingCode)
):
myfile.write(",")
position += 1
myfile.write(myresults.csvoutputShort())
position += 1
k += 1
response = StreamingHttpResponse(myfile.getvalue(), content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="mydata.csv"'
# email = EmailMessage(emailtitle,emailtext,
# 'leonmi@sas.upenn.edu', tolist)
# email.attach('mydata.csv', myfile.getvalue(),'text/csv')
# email.send()
return response
def graph_data(request, selectedrelatedfeature='NotSet', samplingfeature='NotSet', popup='NotSet'):
authenticated = True
if not request.user.is_authenticated:
authenticated = False
if popup == 'NotSet':
template = loader.get_template('chartVariableAndFeature.html')
else:
template = loader.get_template('profileresultgraphpopup.html')
selected_relatedfeatid = 15
data_disclaimer = settings.DATA_DISCLAIMER
# relatedfeatureList
# update_result_on_related_feature
# need a variables list instead of a results list
# find the variables for the selected related feature
if 'SelectedRelatedFeature' in request.POST:
if not request.POST['SelectedRelatedFeature'] == 'All':
# relatedFeature = Samplingfeatures.objects.filter(samplingfeatureid=
# selected_relatedfeatid) #Relatedfeatures.objects.filter(relatedfeatureid=
# int(selected_relatedfeatid)).distinct('relatedfeatureid')
selected_relatedfeatid = int(request.POST['SelectedRelatedFeature'])
# relatedFeature = Samplingfeatures.objects.filter(samplingfeatureid=
# selected_relatedfeatid)
elif selectedrelatedfeature != 'NotSet':
selected_relatedfeatid = int(selectedrelatedfeature)
else:
selected_relatedfeatid = 15
useSamplingFeature = False
samplingfeaturelabel = None
if samplingfeature != 'NotSet':
samplingfeature = int(samplingfeature)
useSamplingFeature = True
samplingfeaturelabel = Samplingfeatures.objects.filter(samplingfeatureid=samplingfeature).get()
# find variables found at the sampling feature
# need to go through featureaction to get to results
# need the feature actions for all of the sampling features related to this sampling feature
if not useSamplingFeature:
sampling_features = Relatedfeatures.objects.filter(
relatedfeatureid__exact=selected_relatedfeatid).values(
'samplingfeatureid')
samplingfeaturelabel = Samplingfeatures.objects.filter(samplingfeatureid=selected_relatedfeatid).get()
# select the feature actions for all of the related features.
feature_actions = Featureactions.objects.filter(samplingfeatureid__in=sampling_features)
else:
feature_actions = Featureactions.objects.filter(samplingfeatureid=samplingfeature)
featureresults = Results.objects.filter(
featureactionid__in=feature_actions
).order_by(
"variableid", "unitsid"
).filter(
~Q(featureactionid__samplingfeatureid__sampling_feature_type="Ecological land "
"classification")
).filter(
~Q(featureactionid__samplingfeatureid__sampling_feature_type="Field area"))
variableList = Variables.objects.filter(variableid__in=featureresults.values("variableid"))
# find the profile results series for the selected variable
numvariables = variableList.__len__()
# raise ValidationError(numvariables)
selectedMVariableSeries = []
for i in range(0, numvariables):
selectionStr = str('selection' + str(i))
if selectionStr in request.POST:
# raise ValidationError(request.POST[selectionStr])
for variable in variableList:
if int(request.POST[selectionStr]) == variable.variableid:
selectedMVariableSeries.append(int(request.POST[selectionStr]))
# if no series were selected (like on first load) set the series to some value.
if len(variableList) > 0 and len(selectedMVariableSeries) == 0:
selectedMVariableSeries.append(int(variableList[0].variableid))
elif len(variableList) == 0 and len(selectedMVariableSeries) == 0:
selectedMVariableSeries.append(15)
selectedMResultsSeries = None
for variable in selectedMVariableSeries:
if not selectedMResultsSeries:
selectedMResultsSeries = featureresults.filter(variableid=variable)
else: # concatenante the sets of results for each variable
selectedMResultsSeries = selectedMResultsSeries | featureresults.filter(
variableid=variable)
selected_results = []
name_of_sampling_features = []
name_of_variables = []
name_of_units = []
unitAndVariable = ''
i = 0
data = {}
resultValuesSeries = None
# if 'update_result_on_related_feature' in request.POST:
# raise ValidationError(selectedMResultsSeries)
# selectedMResultsSeries.order_by("resultid__")
# these 5 lines sort the results by there z-spacing low to high, then by
# alphabelitcally by there sampling
# feature code, luckily Ridge, Slope, Valley are in alphabetical order.
profileresults = Profileresults.objects.filter(resultid__in=selectedMResultsSeries).order_by(
"resultid__variableid",
"resultid__unitsid",
"intendedzspacing",
"resultid__featureactionid__samplingfeatureid__samplingfeaturecode")
sortedResults = list()
for result in profileresults:
sortedResults.append(selectedMResultsSeries.get(resultid=result.resultid.resultid))
selectedMResultsSeries = sortedResults
for selectedMResult in selectedMResultsSeries:
i += 1
selected_result = Results.objects.filter(resultid=selectedMResult.resultid).get()
# if 'update_result_on_related_feature' in request.POST:
# raise ValidationError(selected_result)
selected_results.append(selected_result)
# name_of_sampling_features.append(get_name_of_sampling_feature(selected_result))
tmpname = selected_result.featureactionid.samplingfeatureid.samplingfeaturename # get_name_of_sampling_feature(selected_result)
tmpLocName = tmpname
tmpname = selected_result.variableid.variablecode # get_name_of_variable(selected_result)
unitAndVariable = tmpname
if name_of_variables.__len__() > 0:
name_of_variables.append(tmpname)
else:
name_of_variables.append(tmpname)
tmpname = selected_result.unitsid.unitsname # get_name_of_units(selected_result)
# if(selectedMResult.resultid==2072):
# raise ValidationError(tmpname)
unitAndVariable = unitAndVariable + " " + tmpname
if name_of_units.__len__() > 0:
name_of_units.append(tmpname)
else:
name_of_units.append(tmpname)
resultValues = Profileresultvalues.objects.all().filter(
resultid__exact=selectedMResult.resultid) # .order_by("-zlocation")
if not resultValuesSeries:
resultValuesSeries = resultValues
else:
resultValuesSeries = resultValuesSeries | resultValues
# if 'update_result_on_related_feature' in request.POST:
# raise ValidationError(resultValues)
for resultValue in resultValues:
# raise ValidationError(resultValues)
seriesName = 'datavalue' + unitAndVariable
tmpLocName = tmpLocName + " Depth " + str(
resultValue.zlocation - resultValue.zaggregationinterval) + "-" + str(
resultValue.zlocation) + " " + str(resultValue.zlocationunitsid.unitsabbreviation)
name_of_sampling_features.append(tmpLocName)
if seriesName in data:
if resultValue.datavalue != -6999 and resultValue.datavalue != -888.88:
data['datavalue' + unitAndVariable].append(
[tmpLocName,
resultValue.datavalue]) # tmpUnit +' - '+tmpVariableName +' - '+
else:
data['datavalue' + unitAndVariable].append([tmpLocName, None])
else:
data.update({'datavalue' + unitAndVariable: []})
if resultValue.datavalue != -6999 and resultValue.datavalue != -888.88:
data['datavalue' + unitAndVariable].append(
[tmpLocName,
resultValue.datavalue]) # tmpUnit +' - '+tmpVariableName +' - '+
else:
data['datavalue' + unitAndVariable].append([tmpLocName, None])
# data['datavalue' + unitAndVariable].append( resultValue.datavalue)
# #get_name_of_variable(selected_result) + " " +
# get_name_of_sampling_feature(selected_result) ,
# data2.append(resultValue.datavalue)
# raise ValidationError(data)
# build strings for graph labels
i = 0
seriesStr = ''
series = []
titleStr = ''
tmpUnit = ''
tmpVariableName = ''
numberofLocations = len(name_of_sampling_features)
for name_of_unit, name_of_variable in zip(name_of_units, name_of_variables):
# raise ValidationError("length of unit names"+ str(len(name_of_units)) +
# "length of name of variables"+ str(len(name_of_variables)))
# #get fewer sampling feature names
i += 1
lastUnit = tmpUnit
lastVariableName = tmpVariableName
tmpVariableName = name_of_variable
tmpUnit = name_of_unit
if not name_of_variable == lastVariableName or not name_of_unit == lastUnit:
update = True
else:
update = False
if i == 1 and not name_of_unit == '':
seriesStr += name_of_unit
elif name_of_unit != lastUnit and update:
# tmpUnit = name_of_unit
seriesStr += ' - ' + name_of_unit
lastUnitAndVariable = unitAndVariable
unitAndVariable = tmpVariableName + " " + tmpUnit
# raise ValidationError(data['datavalue'+unitAndVariable])
# raise ValidationError(name_of_unit)
key = 'datavalue' + unitAndVariable
if lastUnitAndVariable != unitAndVariable and update and key in data:
series.append({"name": tmpUnit + ' - ' + tmpVariableName, "yAxis": tmpUnit,
#"area": {"cropThreshold": 50000},
"data": data[
'datavalue' + unitAndVariable]})
# removewd from name +' - '+ tmpLocName
if titleStr == '':
titleStr = tmpVariableName
else:
titleStr += ' - ' + tmpVariableName
elif i == numberofLocations and len(series) == 0 and key in data:
# raise ValidationError(name_of_unit)
series.append({"name": tmpUnit + ' - ' + tmpVariableName, "yAxis": tmpUnit,
"data": data['datavalue' + unitAndVariable]})
if titleStr == '':
titleStr = tmpVariableName
# titleStr += tmpVariableName
# series.append(data['datavalue'+str(i)])
chartID = 'chart_id'
chart = {"renderTo": chartID, "type": 'column', "zoomType": 'xy'}
title2 = {"text": titleStr}
# xAxis = {"categories":xAxisCategories,} #"type":
# 'category',"title": {"text": xAxisCategories},
yAxis = {"title": {"text": seriesStr}}
graphType = 'column'
withProfileResults = Profileresults.objects.all()
results = Results.objects.filter(resultid__in=withProfileResults)
samplefeatid = Featureactions.objects.filter(featureactionid__in=results).values(
'samplingfeatureid')
relatedFeatureList = Relatedfeatures.objects.filter(
samplingfeatureid__in=samplefeatid).distinct(
'relatedfeatureid') # .order_by('relatedfeatureid')
# relatedFeatureList = sorted(relatedFeatureList,
# key=operator.attrgetter('relatedfeatureid__samplingfeaturecode'))
# #relatedFeatureList.order_by('relatedfeatureid__samplingfeaturecode')
int_selectedvariable_ids = []
for int_selectedvariableid in selectedMVariableSeries:
int_selectedvariable_ids.append(int(int_selectedvariableid))
# if the user hit the export csv button export the measurement results to csv
linkExtProperty = Extensionproperties.objects.filter(propertyname="DatasetFileLink").get()
datasetresults = Datasetsresults.objects.filter(resultid__in=selectedMResultsSeries)
datasets = Datasets.objects.filter(datasetid__in=datasetresults.values("datasetid"))
datasetcitations = Datasetcitations.objects.filter(datasetid__in=datasets)
citations = Citations.objects.filter(citationid__in=datasetcitations.values("citationid"))
datasetcitationlinks = {}
for citation in citations:
extprop = Citationextensionpropertyvalues.objects.filter(citationid=citation).get(propertyid=linkExtProperty)
try:
datasetcitationlinks[citation.title] = extprop.propertyvalue
except ObjectDoesNotExist:
datasetcitationlinks[citation.title] = ''
if 'export_data' in request.POST:
resultValuesSeries = resultValuesSeries.order_by(
"resultid__resultid__featureactionid__samplingfeatureid__samplingfeaturecode",
"resultid__intendedzspacing", "resultid__resultid__variableid",
"resultid__resultid__unitsid")
response = exportspreadsheet(request, resultValuesSeries)
return response
else:
# this removes duplicates from a list of strings
name_of_units = removeDupsFromListOfStrings(name_of_units)
# raise ValidationError(relatedFeatureList)
return TemplateResponse(request, template,
{'prefixpath': settings.CUSTOM_TEMPLATE_PATH,
'datasetcitationlinks': datasetcitationlinks,
'variableList': variableList,
'SelectedVariables': int_selectedvariable_ids,
'authenticated': authenticated, 'data_disclaimer': data_disclaimer,
'chartID': chartID, 'chart': chart, 'series': series,
'title2': title2, 'graphType': graphType, 'yAxis': yAxis,
'name_of_units': name_of_units,
'samplingfeaturelabel': samplingfeaturelabel,
'relatedFeatureList': relatedFeatureList,
'SelectedRelatedFeature': selected_relatedfeatid,
'name': request.user, 'site_title': admin.site.site_title,
'site_header': admin.site.site_header,
'short_title': 'Soils Data'
}, ) | mit |
w-garcia/BugClustering | classifier.py | 1 | 6864 | from generate_vectors import generate_vectors
from clustering_h_agglomerative import do_h_agglomerative
from clustering_sklearn import do_sklearn
from config import config as cfg
import csv
import DBModel
import util
import copy
import random
def classify(slice=None):
_dataset_stack, selection_cache = setup_datasets(slice)
list_of_dicts = []
while _dataset_stack:
print "[classifier] : {} tickets left to go.".format(len(_dataset_stack))
row = _dataset_stack.pop()
row_copy = copy.deepcopy(row)
row_copy.classification = ''
assert(uniqueness_condition(selection_cache, row_copy),
"Classifier bug ticket is not unique! Check ticket dataset for duplicates.")
original_len = len(selection_cache)
selection_cache.append(row_copy)
new_len = len(selection_cache)
print "[vectors] : Original dataset length: {}, new length: {}".format(original_len, new_len)
#TODO: change prediction variable such that I can pass in a list of addon rows and get a list of predictions
prediction = []
generate_vectors(cfg.model_selection, selection_cache)
if cfg.classification_method == 'default':
do_h_agglomerative(cfg.model_selection, prediction)
elif cfg.classification_method == 'knn':
do_sklearn(cfg.model_selection, prediction)
elif cfg.classification_method == 'kmeans':
do_sklearn(cfg.model_selection, prediction)
selection_cache.pop()
_row_dict = create_row_dict(prediction, row)
list_of_dicts.append(_row_dict)
cls_path = util.generate_meta_path(cfg.model_selection, 'classifier')
util.ensure_path_exists(cls_path)
filename = ''
if cfg.classification_method == 'default':
filename = cls_path + list_of_dicts[0]['system'] + '_classifier.csv'
elif cfg.classification_method == 'knn':
filename = cls_path + list_of_dicts[0]['system'] + '_knn_classifier.csv'
elif cfg.classification_method == 'kmeans':
filename = cls_path + list_of_dicts[0]['system'] + '_kmeans_classifier.csv'
write_classifier_file(filename, list_of_dicts)
print "[status] : Classifier finished. Analysis started."
def create_row_dict(prediction, row):
row_dict = {'id': row.id, 'description': row.description,
'system': row.system, 'ground truth': row.classification,
'prediction': ' '.join(prediction[0])}
return row_dict
def get_chunks(data_list):
num_chunks = int(1 / cfg.test_dataset_split)
len_chunk = len(data_list) / num_chunks
remainders = len(data_list) % num_chunks
# Chop off remainder tickets if length of data_list yields a remainder
if remainders:
data_list = data_list[:-remainders]
return [data_list[i:i + len_chunk] for i in range(0, len(data_list), len_chunk)]
def contains_classes_of_interest(row):
for ci in cfg.classes_of_interest:
for c in row.classification.split(' '):
if ci in c:
return True
return False
def setup_datasets(slice):
# TODO: Get in random order, sequential tickets might be similair (important when processing multiple tickets at once)
# Create dataset stack to be labelled
if cfg.clustering_mode == 'test':
_dataset_stack = [row for row in DBModel.LFF_Keywords.get_db_ref_by_system(cfg.test_dataset).select()
if contains_classes_of_interest(row)]
if cfg.test_dataset == cfg.model_selection:
# Split up the same dataset according to split
chunks = get_chunks(_dataset_stack)
# Get index to use as test. This is done using the slice variable for k-fold, or randomly for random subsampling
if cfg.xvalidation_mode == 'kfold':
index = slice
elif cfg.xvalidation_mode == 'rand_ss':
index = random.randint(0, len(chunks) - 1)
else:
index = 0
_dataset_stack = chunks[index]
chunks.pop(index)
# Flatten rest of chunks into selection
selection_cache = [row for chunk in chunks for row in chunk]
return _dataset_stack, selection_cache
elif cfg.model_selection == 'all_systems':
# First add all the tickets, ignoring the nested system
selection_cache = []
for system in util.systems:
if system == cfg.test_dataset:
continue
for row in DBModel.LFF_Keywords.get_db_ref_by_system(system):
if contains_classes_of_interest(row):
selection_cache.append(row)
# Figure out what the test dataset will be
chunks = get_chunks(_dataset_stack)
# Get index to use as test. This is done using the slice variable for k-fold, or randomly for random subsampling
if cfg._xvalidation_mode == 'kfold':
index = slice
elif cfg._xvalidation_mode == 'rand_ss':
index = random.randint(0, len(chunks) - 1)
else: #Not sure default should be yet
index = 0
_dataset_stack = chunks[index]
chunks.pop(index)
# Now add the remaining tickets from the test dataset using flattened rest of chunks
for row in [row for chunk in chunks for row in chunk]:
selection_cache.append(row)
return _dataset_stack, selection_cache
else:
selection_cache = [row for row in DBModel.LFF_Keywords.get_db_ref_by_system(cfg.model_selection).select()]
return _dataset_stack, selection_cache
else:
_dataset_stack = [row for row in DBModel.LFF_Keywords.get_db_ref_by_system(cfg.labelling_dataset).select()]
# Label dataset should never intersect with model data
if cfg.model_selection == 'all_systems':
selection_cache = []
for system in util.systems:
for row in DBModel.LFF_Keywords.get_db_ref_by_system(system).select():
selection_cache.append(row)
else:
selection_cache = [row for row in DBModel.LFF_Keywords.get_db_ref_by_system(cfg.model_selection).select()]
return _dataset_stack, selection_cache
def write_classifier_file(filename, list_of_dicts):
with open(filename, 'w') as csvfile:
fieldnames = list_of_dicts[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in list_of_dicts:
writer.writerow(row)
def uniqueness_condition(selection, addon_selection):
original_ids = [row.id for row in selection]
add_id = addon_selection.id
if add_id in original_ids:
return False
return True
| apache-2.0 |
mcr/ietfdb | django/contrib/gis/utils/geoip.py | 316 | 14811 | """
This module houses the GeoIP object, a ctypes wrapper for the MaxMind GeoIP(R)
C API (http://www.maxmind.com/app/c). This is an alternative to the GPL
licensed Python GeoIP interface provided by MaxMind.
GeoIP(R) is a registered trademark of MaxMind, LLC of Boston, Massachusetts.
For IP-based geolocation, this module requires the GeoLite Country and City
datasets, in binary format (CSV will not work!). The datasets may be
downloaded from MaxMind at http://www.maxmind.com/download/geoip/database/.
Grab GeoIP.dat.gz and GeoLiteCity.dat.gz, and unzip them in the directory
corresponding to settings.GEOIP_PATH. See the GeoIP docstring and examples
below for more details.
TODO: Verify compatibility with Windows.
Example:
>>> from django.contrib.gis.utils import GeoIP
>>> g = GeoIP()
>>> g.country('google.com')
{'country_code': 'US', 'country_name': 'United States'}
>>> g.city('72.14.207.99')
{'area_code': 650,
'city': 'Mountain View',
'country_code': 'US',
'country_code3': 'USA',
'country_name': 'United States',
'dma_code': 807,
'latitude': 37.419200897216797,
'longitude': -122.05740356445312,
'postal_code': '94043',
'region': 'CA'}
>>> g.lat_lon('salon.com')
(37.789798736572266, -122.39420318603516)
>>> g.lon_lat('uh.edu')
(-95.415199279785156, 29.77549934387207)
>>> g.geos('24.124.1.80').wkt
'POINT (-95.2087020874023438 39.0392990112304688)'
"""
import os, re
from ctypes import c_char_p, c_float, c_int, Structure, CDLL, POINTER
from ctypes.util import find_library
from django.conf import settings
if not settings.configured: settings.configure()
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = dict((key, getattr(settings, key))
for key in ('GEOIP_PATH', 'GEOIP_LIBRARY_PATH', 'GEOIP_COUNTRY', 'GEOIP_CITY')
if hasattr(settings, key))
lib_path = GEOIP_SETTINGS.get('GEOIP_LIBRARY_PATH', None)
# GeoIP Exception class.
class GeoIPException(Exception): pass
# The shared library for the GeoIP C API. May be downloaded
# from http://www.maxmind.com/download/geoip/api/c/
if lib_path:
lib_name = None
else:
# TODO: Is this really the library name for Windows?
lib_name = 'GeoIP'
# Getting the path to the GeoIP library.
if lib_name: lib_path = find_library(lib_name)
if lib_path is None: raise GeoIPException('Could not find the GeoIP library (tried "%s"). '
'Try setting GEOIP_LIBRARY_PATH in your settings.' % lib_name)
lgeoip = CDLL(lib_path)
# Regular expressions for recognizing IP addresses and the GeoIP
# free database editions.
ipregex = re.compile(r'^(?P<w>\d\d?\d?)\.(?P<x>\d\d?\d?)\.(?P<y>\d\d?\d?)\.(?P<z>\d\d?\d?)$')
free_regex = re.compile(r'^GEO-\d{3}FREE')
lite_regex = re.compile(r'^GEO-\d{3}LITE')
#### GeoIP C Structure definitions ####
class GeoIPRecord(Structure):
_fields_ = [('country_code', c_char_p),
('country_code3', c_char_p),
('country_name', c_char_p),
('region', c_char_p),
('city', c_char_p),
('postal_code', c_char_p),
('latitude', c_float),
('longitude', c_float),
# TODO: In 1.4.6 this changed from `int dma_code;` to
# `union {int metro_code; int dma_code;};`. Change
# to a `ctypes.Union` in to accomodate in future when
# pre-1.4.6 versions are no longer distributed.
('dma_code', c_int),
('area_code', c_int),
# TODO: The following structure fields were added in 1.4.3 --
# uncomment these fields when sure previous versions are no
# longer distributed by package maintainers.
#('charset', c_int),
#('continent_code', c_char_p),
]
class GeoIPTag(Structure): pass
#### ctypes function prototypes ####
RECTYPE = POINTER(GeoIPRecord)
DBTYPE = POINTER(GeoIPTag)
# For retrieving records by name or address.
def record_output(func):
func.restype = RECTYPE
return func
rec_by_addr = record_output(lgeoip.GeoIP_record_by_addr)
rec_by_name = record_output(lgeoip.GeoIP_record_by_name)
# For opening & closing GeoIP database files.
geoip_open = lgeoip.GeoIP_open
geoip_open.restype = DBTYPE
geoip_close = lgeoip.GeoIP_delete
geoip_close.argtypes = [DBTYPE]
geoip_close.restype = None
# String output routines.
def string_output(func):
func.restype = c_char_p
return func
geoip_dbinfo = string_output(lgeoip.GeoIP_database_info)
cntry_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr)
cntry_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name)
cntry_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr)
cntry_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name)
#### GeoIP class ####
class GeoIP(object):
# The flags for GeoIP memory caching.
# GEOIP_STANDARD - read database from filesystem, uses least memory.
#
# GEOIP_MEMORY_CACHE - load database into memory, faster performance
# but uses more memory
#
# GEOIP_CHECK_CACHE - check for updated database. If database has been updated,
# reload filehandle and/or memory cache.
#
# GEOIP_INDEX_CACHE - just cache
# the most frequently accessed index portion of the database, resulting
# in faster lookups than GEOIP_STANDARD, but less memory usage than
# GEOIP_MEMORY_CACHE - useful for larger databases such as
# GeoIP Organization and GeoIP City. Note, for GeoIP Country, Region
# and Netspeed databases, GEOIP_INDEX_CACHE is equivalent to GEOIP_MEMORY_CACHE
#
GEOIP_STANDARD = 0
GEOIP_MEMORY_CACHE = 1
GEOIP_CHECK_CACHE = 2
GEOIP_INDEX_CACHE = 4
cache_options = dict((opt, None) for opt in (0, 1, 2, 4))
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initializes the GeoIP object, no parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP data sets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.dat) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH settings attribute.
* cache: The cache settings when opening up the GeoIP datasets,
and may be an integer in (0, 1, 2, 4) corresponding to
the GEOIP_STANDARD, GEOIP_MEMORY_CACHE, GEOIP_CHECK_CACHE,
and GEOIP_INDEX_CACHE `GeoIPOptions` C API settings,
respectively. Defaults to 0, meaning that the data is read
from the disk.
* country: The name of the GeoIP country data file. Defaults to
'GeoIP.dat'; overrides the GEOIP_COUNTRY settings attribute.
* city: The name of the GeoIP city data file. Defaults to
'GeoLiteCity.dat'; overrides the GEOIP_CITY settings attribute.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = self.cache_options[cache]
else:
raise GeoIPException('Invalid caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS.get('GEOIP_PATH', None)
if not path: raise GeoIPException('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, basestring):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try and open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS.get('GEOIP_COUNTRY', 'GeoIP.dat'))
if os.path.isfile(country_db):
self._country = geoip_open(country_db, cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS.get('GEOIP_CITY', 'GeoLiteCity.dat'))
if os.path.isfile(city_db):
self._city = geoip_open(city_db, cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure
# out whether the given database path is for the GeoIP country
# or city databases.
ptr = geoip_open(path, cache)
info = geoip_dbinfo(ptr)
if lite_regex.match(info):
# GeoLite City database detected.
self._city = ptr
self._city_file = path
elif free_regex.match(info):
# GeoIP Country database detected.
self._country = ptr
self._country_file = path
else:
raise GeoIPException('Unable to recognize database edition: %s' % info)
else:
raise GeoIPException('GeoIP path must be a valid file or directory.')
def __del__(self):
# Cleaning any GeoIP file handles lying around.
if self._country: geoip_close(self._country)
if self._city: geoip_close(self._city)
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, basestring):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIPException('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIPException('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIPException('Invalid GeoIP city data file: %s' % self._city_file)
def city(self, query):
"""
Returns a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
self._check_query(query, city=True)
if ipregex.match(query):
# If an IP address was passed in
ptr = rec_by_addr(self._city, c_char_p(query))
else:
# If a FQDN was passed in.
ptr = rec_by_name(self._city, c_char_p(query))
# Checking the pointer to the C structure, if valid pull out elements
# into a dicionary and return.
if bool(ptr):
record = ptr.contents
return dict((tup[0], getattr(record, tup[0])) for tup in record._fields_)
else:
return None
def country_code(self, query):
"Returns the country code for the given IP Address or FQDN."
self._check_query(query, city_or_country=True)
if self._country:
if ipregex.match(query): return cntry_code_by_addr(self._country, query)
else: return cntry_code_by_name(self._country, query)
else:
return self.city(query)['country_code']
def country_name(self, query):
"Returns the country name for the given IP Address or FQDN."
self._check_query(query, city_or_country=True)
if self._country:
if ipregex.match(query): return cntry_name_by_addr(self._country, query)
else: return cntry_name_by_name(self._country, query)
else:
return self.city(query)['country_name']
def country(self, query):
"""
Returns a dictonary with with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
return {'country_code' : self.country_code(query),
'country_name' : self.country_name(query),
}
#### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None: return None
else: return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Returns a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Returns a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Returns a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
#### GeoIP Database Information Routines ####
def country_info(self):
"Returns information about the GeoIP country database."
if self._country is None:
ci = 'No GeoIP Country data in "%s"' % self._country_file
else:
ci = geoip_dbinfo(self._country)
return ci
country_info = property(country_info)
def city_info(self):
"Retuns information about the GeoIP city database."
if self._city is None:
ci = 'No GeoIP City data in "%s"' % self._city_file
else:
ci = geoip_dbinfo(self._city)
return ci
city_info = property(city_info)
def info(self):
"Returns information about all GeoIP databases in use."
return 'Country:\n\t%s\nCity:\n\t%s' % (self.country_info, self.city_info)
info = property(info)
#### Methods for compatibility w/the GeoIP-Python API. ####
@classmethod
def open(cls, full_path, cache):
return GeoIP(full_path, cache)
def _rec_by_arg(self, arg):
if self._city:
return self.city(arg)
else:
return self.country(arg)
region_by_addr = city
region_by_name = city
record_by_addr = _rec_by_arg
record_by_name = _rec_by_arg
country_code_by_addr = country_code
country_code_by_name = country_code
country_name_by_addr = country_name
country_name_by_name = country_name
| bsd-3-clause |
heli522/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 246 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
arabenjamin/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 246 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
DonBeo/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 335 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
Subsets and Splits