repo_name
stringlengths 6
103
| path
stringlengths 5
191
| copies
stringlengths 1
4
| size
stringlengths 4
6
| content
stringlengths 986
970k
| license
stringclasses 15
values |
---|---|---|---|---|---|
rflamary/POT | examples/plot_otda_mapping.py | 2 | 4084 | # -*- coding: utf-8 -*-
"""
===========================================
OT mapping estimation for domain adaptation
===========================================
This example presents how to use MappingTransport to estimate at the same
time both the coupling transport and approximate the transport map with either
a linear or a kernelized mapping as introduced in [8].
[8] M. Perrot, N. Courty, R. Flamary, A. Habrard,
"Mapping estimation for discrete optimal transport",
Neural Information Processing Systems (NIPS), 2016.
"""
# Authors: Remi Flamary <remi.flamary@unice.fr>
# Stanislas Chambon <stan.chambon@gmail.com>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
##############################################################################
# Generate data
# -------------
n_source_samples = 100
n_target_samples = 100
theta = 2 * np.pi / 20
noise_level = 0.1
Xs, ys = ot.datasets.make_data_classif(
'gaussrot', n_source_samples, nz=noise_level)
Xs_new, _ = ot.datasets.make_data_classif(
'gaussrot', n_source_samples, nz=noise_level)
Xt, yt = ot.datasets.make_data_classif(
'gaussrot', n_target_samples, theta=theta, nz=noise_level)
# one of the target mode changes its variance (no linear mapping)
Xt[yt == 2] *= 3
Xt = Xt + 4
##############################################################################
# Plot data
# ---------
pl.figure(1, (10, 5))
pl.clf()
pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples')
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples')
pl.legend(loc=0)
pl.title('Source and target distributions')
##############################################################################
# Instantiate the different transport algorithms and fit them
# -----------------------------------------------------------
# MappingTransport with linear kernel
ot_mapping_linear = ot.da.MappingTransport(
kernel="linear", mu=1e0, eta=1e-8, bias=True,
max_iter=20, verbose=True)
ot_mapping_linear.fit(Xs=Xs, Xt=Xt)
# for original source samples, transform applies barycentric mapping
transp_Xs_linear = ot_mapping_linear.transform(Xs=Xs)
# for out of source samples, transform applies the linear mapping
transp_Xs_linear_new = ot_mapping_linear.transform(Xs=Xs_new)
# MappingTransport with gaussian kernel
ot_mapping_gaussian = ot.da.MappingTransport(
kernel="gaussian", eta=1e-5, mu=1e-1, bias=True, sigma=1,
max_iter=10, verbose=True)
ot_mapping_gaussian.fit(Xs=Xs, Xt=Xt)
# for original source samples, transform applies barycentric mapping
transp_Xs_gaussian = ot_mapping_gaussian.transform(Xs=Xs)
# for out of source samples, transform applies the gaussian mapping
transp_Xs_gaussian_new = ot_mapping_gaussian.transform(Xs=Xs_new)
##############################################################################
# Plot transported samples
# ------------------------
pl.figure(2)
pl.clf()
pl.subplot(2, 2, 1)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=.2)
pl.scatter(transp_Xs_linear[:, 0], transp_Xs_linear[:, 1], c=ys, marker='+',
label='Mapped source samples')
pl.title("Bary. mapping (linear)")
pl.legend(loc=0)
pl.subplot(2, 2, 2)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=.2)
pl.scatter(transp_Xs_linear_new[:, 0], transp_Xs_linear_new[:, 1],
c=ys, marker='+', label='Learned mapping')
pl.title("Estim. mapping (linear)")
pl.subplot(2, 2, 3)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=.2)
pl.scatter(transp_Xs_gaussian[:, 0], transp_Xs_gaussian[:, 1], c=ys,
marker='+', label='barycentric mapping')
pl.title("Bary. mapping (kernel)")
pl.subplot(2, 2, 4)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=.2)
pl.scatter(transp_Xs_gaussian_new[:, 0], transp_Xs_gaussian_new[:, 1], c=ys,
marker='+', label='Learned mapping')
pl.title("Estim. mapping (kernel)")
pl.tight_layout()
pl.show()
| mit |
herilalaina/scikit-learn | sklearn/utils/tests/test_graph.py | 37 | 1042 | # Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.graph import graph_laplacian
from sklearn.utils.testing import ignore_warnings
@ignore_warnings(category=DeprecationWarning)
def test_graph_laplacian():
for mat in (np.arange(10) * np.arange(10)[:, np.newaxis],
np.ones((7, 7)),
np.eye(19),
np.vander(np.arange(4)) + np.vander(np.arange(4)).T,):
sp_mat = sparse.csr_matrix(mat)
for normed in (True, False):
laplacian = graph_laplacian(mat, normed=normed)
n_nodes = mat.shape[0]
if not normed:
np.testing.assert_array_almost_equal(laplacian.sum(axis=0),
np.zeros(n_nodes))
np.testing.assert_array_almost_equal(laplacian.T, laplacian)
np.testing.assert_array_almost_equal(
laplacian, graph_laplacian(sp_mat, normed=normed).toarray())
| bsd-3-clause |
schets/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 212 | 3359 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
h2oai/h2o | py/testdir_multi_jvm/test_parse_with_cancel.py | 8 | 3409 | import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_jobs
DELETE_KEYS = True
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(3, java_heap_GB=4)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_with_cancel(self):
mustWait = 10
importFolderPath = 'standard'
timeoutSecs = 500
csvFilenameList = [
("standard", "covtype.data", 54),
("manyfiles-nflx-gz", "file_1.dat.gz", 378),
("standard", "covtype20x.data", 54),
("manyfiles-nflx-gz", "file_[100-109].dat.gz", 378),
]
# just loop on the same file. If remnants exist and are locked, we will blow up?
# Maybe try to do an inspect to see if either the source key or parse key exist and cause stack traces
for (importFolderPath, csvFilename, response) in csvFilenameList:
# creates csvFilename.hex from file in importFolder dir
csvPathname = importFolderPath + "/" + csvFilename
hex_key = csvFilename + ".hex"
(importResult, importPattern) = h2i.import_only(bucket='home-0xdiag-datasets', path=csvPathname, timeoutSecs=50)
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, hex_key=hex_key,
timeoutSecs=500, noPoll=True, doSummary=False)
job_key = parseResult['job_key']
# give it a little time to start
time.sleep(3)
h2o.nodes[0].jobs_cancel(key=job_key)
# now wait until the job cancels, and we're idle
h2o_jobs.pollWaitJobs(timeoutSecs=30)
elapsed = time.time() - start
print "Cancelled parse completed in", elapsed, "seconds."
h2o.check_sandbox_for_errors()
# get a list of keys from storview. 20 is fine..shouldn't be many, since we putfile, not import folder
# there maybe a lot since we import the whole "standard" folder
# find the ones that pattern match the csvFilename, and inspect them. Might be none
storeViewResult = h2o_cmd.runStoreView(timeoutSecs=timeoutSecs, view=100)
keys = storeViewResult['keys']
for k in keys:
keyName = k['key']
print "kevin:", keyName
if csvFilename in keyName:
h2o_cmd.runInspect(key=keyName)
h2o.check_sandbox_for_errors()
# This will tell h2o to delete using the key name from the import file, whatever pattern matches to csvFilename
# we shouldn't have to do this..the import/parse should be able to overwrite without deleting.
# h2i.delete_keys_from_import_result(pattern=csvFilename, importResult=importResult)
# If you cancel a parse, you aren't allowed to reparse the same file or import a directory with that file,
# or cause the key name that the parse would have used, for 5 seconds after the cancel request gets a json
# response
print "Waiting", mustWait, "seconds before next reparse-cancel."
time.sleep(mustWait)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
xzturn/tensorflow | tensorflow/python/data/experimental/ops/enumerate_ops.py | 11 | 1965 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Enumerate dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@deprecation.deprecated(None, "Use `tf.data.Dataset.enumerate()")
@tf_export("data.experimental.enumerate_dataset")
def enumerate_dataset(start=0):
"""A transformation that enumerates the elements of a dataset.
It is similar to python's `enumerate`.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { (7, 8), (9, 10) }
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
a.apply(tf.data.experimental.enumerate_dataset(start=5))
=> { (5, 1), (6, 2), (7, 3) }
b.apply(tf.data.experimental.enumerate_dataset())
=> { (0, (7, 8)), (1, (9, 10)) }
```
Args:
start: A `tf.int64` scalar `tf.Tensor`, representing the start value for
enumeration.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return dataset.enumerate(start)
return _apply_fn
| apache-2.0 |
nhuntwalker/astroML | astroML/linear_model/kernel_regression.py | 4 | 1568 | import numpy as np
from .linear_regression import gaussian_basis
from sklearn.metrics import pairwise_kernels
class NadarayaWatson(object):
"""Nadaraya-Watson Kernel Regression
This is basically a gaussian-weighted moving average of points
Parameters
----------
kernel : string
kernel is either "gaussian", or one of the kernels available in
sklearn.metrics.pairwise.
h : float or array_like
width of kernel. If array, its length must be the number of
dimensions in the training data
Additional keyword arguments are passed to the kernel.
"""
def __init__(self, kernel='gaussian', h=None, **kwargs):
self.kernel = kernel
self.h = h
self.kwargs = kwargs
def fit(self, X, y, dy=1):
self.X = np.asarray(X)
self.y = np.asarray(y)
self.dy = np.atleast_1d(dy)
return self
def predict(self, X):
X = np.asarray(X)
if X.ndim != 2:
raise ValueError('X must be two-dimensional')
if X.shape[1] != self.X.shape[1]:
raise ValueError('dimensions of X do not match training dimension')
if self.kernel == 'gaussian':
# wrangle gaussian into scikit-learn's 'rbf' kernel
h = np.asarray(self.h)
gamma = 0.5 / h / h
K = pairwise_kernels(X, self.X, metric='rbf', gamma=gamma)
else:
K = pairwise_kernels(X, self.X, metric=self.kernel, **self.kwargs)
K /= self.dy ** 2
return (K * self.y).sum(1) / K.sum(1)
| bsd-2-clause |
shareactorIO/pipeline | source.ml/jupyterhub.ml/notebooks/zz_old/TensorFlow/Fundamentals/distributed_cnn.py | 3 | 12868 | import tensorflow as tf
import numpy as np
# Modules required for file download and extraction
import os
import sys
import tarfile
from six.moves.urllib.request import urlretrieve
from scipy import ndimage
outdir = '/tmp/pipeline/datasets/notmist/'
def maybe_download(filename, url, force=False):
"""Download a file if not present."""
if force or not os.path.exists(outdir + filename):
filename, _ = urlretrieve(url + filename, outdir + filename)
print('\nDownload complete for {}'.format(filename))
else:
print('File {} already present.'.format(filename))
print(filename)
return outdir + filename
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('{} already present - don\'t need to extract {}.'.format(root, filename))
else:
print('Extracting data for {}. This may take a while. Please wait.'.format(root))
print(filename)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(root[0:root.rfind('/') + 1])
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
print(data_folders)
return data_folders
# Locations to download data:
url = 'http://yaroslavvb.com/upload/notMNIST/'
# Download two datasets
train_zip_path = maybe_download('notMNIST_small.tar.gz', url)
# Extract datasets
train_folders = maybe_extract(train_zip_path)
image_height = 28 # Pixel height of images
image_width = 28 # Pixel width of images
pixel_depth = 255.0 # Number of levels per pixel
expected_img_shape = (image_height, image_width) # Black and white image, no 3rd dimension
num_labels = len(train_folders)
def load_image_folder(folder):
"""Load the data for a single image label."""
# Create a list of image paths inside the folder
image_files = os.listdir(folder)
# Create empty numpy array to hold data
dataset = np.ndarray(shape=(len(image_files), image_height, image_width),
dtype=np.float32)
num_images = 0 # Counter for number of successful images loaded
for image in image_files:
image_file = os.path.join(folder, image)
try:
# Read in image pixel data as floating point values
image_data = ndimage.imread(image_file).astype(float)
# Scale values: [0.0, 255.0] => [-1.0, 1.0]
image_data = (image_data - pixel_depth / 2) / (pixel_depth / 2)
if image_data.shape != expected_img_shape:
print('File {} has unexpected dimensions: '.format(str(image_data.shape)))
continue
# Add image to the numpy array dataset
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- skipping this file and moving on.')
# Trim dataset to remove unused space
dataset = dataset[0:num_images, :, :]
return dataset
def make_data_label_arrays(num_rows, image_height, image_width):
"""
Creates and returns empty numpy arrays for input data and labels
"""
if num_rows:
dataset = np.ndarray((num_rows, image_height, image_width), dtype=np.float32)
labels = np.ndarray(num_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def collect_datasets(data_folders):
datasets = []
total_images = 0
for label, data_folder in enumerate(data_folders):
# Bring all test folder images in as numpy arrays
dataset = load_image_folder(data_folder)
num_images = len(dataset)
total_images += num_images
datasets.append((dataset, label, num_images))
return datasets, total_images
def merge_train_test_datasets(datasets, total_images, percent_test):
num_train = total_images * (1.0 - percent_test)
num_test = total_images * percent_test
train_dataset, train_labels = make_data_label_arrays(num_train, image_height, image_width)
test_dataset, test_labels = make_data_label_arrays(num_test, image_height, image_width)
train_counter = 0
test_counter = 0
dataset_counter = 1
for dataset, label, num_images in datasets:
np.random.shuffle(dataset)
if dataset_counter != len(datasets):
n_v = num_images // (1.0 / percent_test)
n_t = num_images - n_v
else:
# Last label, make sure dataset sizes match up to what we created
n_v = len(test_dataset) - test_counter
n_t = len(train_dataset) - train_counter
train_dataset[train_counter: train_counter + n_t] = dataset[:n_t]
train_labels[train_counter: train_counter + n_t] = label
test_dataset[test_counter: test_counter + n_v] = dataset[n_t: n_t + n_v]
test_labels[test_counter: test_counter + n_v] = label
train_counter += n_t
test_counter += n_v
dataset_counter += 1
return train_dataset, train_labels, test_dataset, test_labels
train_test_datasets, train_test_total_images = collect_datasets(train_folders)
train_dataset, train_labels, test_dataset, test_labels = \
merge_train_test_datasets(train_test_datasets, train_test_total_images, 0.1)
# Convert data examples into 3-D tensors
num_channels = 1 # grayscale
def reformat(dataset, labels):
dataset = dataset.reshape(
(-1, image_height, image_width, num_channels)).astype(np.float32)
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
def shuffle_data_with_labels(dataset, labels):
indices = range(len(dataset))
np.random.shuffle(indices)
new_data = np.ndarray(dataset.shape, dataset.dtype)
new_labels = np.ndarray(labels.shape, dataset.dtype)
n = 0
for i in indices:
new_data[n] = dataset[i]
new_labels[n] = labels[i]
n += 1
return new_data, new_labels
train_dataset, train_labels = shuffle_data_with_labels(train_dataset, train_labels)
CLUSTER_SPEC= """
{
'ps' : ['tensorflow0.pipeline.io:8888', 'tensorflow1.pipeline.io:8888'],
'worker' : ['tensorflow2.pipeline.io:8888','tensorflow3.pipeline.io:8888'],
}
"""
import ast
cluster_spec = ast.literal_eval(CLUSTER_SPEC)
spec = tf.train.ClusterSpec(cluster_spec)
workers = ['/job:worker/task:{}'.format(i) for i in range(len(cluster_spec['worker']))]
param_servers = ['/job:ps/task:{}'.format(i) for i in range(len(cluster_spec['ps']))]
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=True)
graph = tf.Graph()
print_versions = []
with graph.as_default():
for worker in workers:
with tf.device(worker):
version = tf.Print(["active"], ["version"], message="worker is ")
print_versions.append(version)
target = "grpc://tensorflow0.pipeline.io:8888"
with tf.Session(target, graph=graph, config=sess_config) as session:
print(session.run(print_versions))
patch_size = 5
depth = 16
num_hidden = 64
def variable_summaries(var, name):
with tf.name_scope("summaries"):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
tf.scalar_summary('sttdev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
def weight_variable(shape, name):
return tf.Variable(tf.truncated_normal(
shape, stddev=0.1), name=name)
def bias_variable(shape, name):
return tf.Variable(tf.constant(0.1, shape=shape), name=name)
def conv2D(data, W, b):
conv = tf.nn.conv2d(data, W, [1, 2, 2, 1], padding='SAME', name="2DConvolution")
return tf.nn.relu(conv + b, name="ReLu")
def fc(data, W, b):
shape = data.get_shape().as_list()
reshape = tf.reshape(data, [-1, shape[1] * shape[2] * shape[3]])
return tf.nn.relu(tf.nn.xw_plus_b(reshape, W, b), name="ReLu")
def model(data):
with tf.name_scope("Layer1"):
activations = conv2D(data, layer1_weights, layer1_biases)
dropped = tf.nn.dropout(activations, 0.5, name="Dropout")
with tf.name_scope("Layer2"):
activations = conv2D(dropped, layer2_weights, layer2_biases)
dropped = tf.nn.dropout(activations, 0.5, name="Dropout")
with tf.name_scope("Layer3"):
activations = fc(dropped, layer3_weights, layer3_biases)
return tf.matmul(activations, layer4_weights) + layer4_biases
graph = tf.Graph()
# divide the input across the cluster:
reduce_loss = []
with graph.as_default():
device_setter = tf.train.replica_device_setter(cluster=cluster_spec)
with tf.device(device_setter):
global_step = tf.Variable(0, name="global_step", trainable=False)
# Input data.
input_data = tf.placeholder(
tf.float32, shape=(None, image_height, image_width, num_channels), name="input_data")
input_labels = tf.placeholder(tf.float32, shape=(None, num_labels), name="input_labels")
layer1_weights = weight_variable([patch_size, patch_size, num_channels, depth], "L1Weights")
layer1_biases = bias_variable([depth], "L1Bias")
layer2_weights = weight_variable([patch_size, patch_size, depth, depth], "L2Weights")
layer2_biases = bias_variable([depth], "L2Bias")
layer3_weights = weight_variable([image_height // 4 * image_width // 4 * depth, num_hidden], "L3Weights")
layer3_biases = bias_variable([num_hidden], "L3Bias")
layer4_weights = weight_variable([num_hidden, num_labels], "L4Weights")
layer4_biases = bias_variable([num_labels], "L4Bias")
splitted = tf.split(0, len(workers), input_data)
label_splitted = tf.split(0, len(workers), input_labels)
# Add variable summaries
for v in [layer1_weights, layer2_weights, layer3_weights, layer4_weights, layer1_biases, layer2_biases, layer3_biases, layer4_biases]:
variable_summaries(v, v.name)
for idx, (portion, worker, label_portion) in enumerate(zip(splitted, workers, label_splitted)):
with tf.device(worker):
# Training computation.
local_reduce = tf.Print(portion, ["portion"], message="portion is")
logits = model(portion)
loss = tf.nn.softmax_cross_entropy_with_logits(logits, label_portion)
loss = tf.Print(loss, [tf.reduce_sum(loss), global_step], message="loss, global_step = ")
reduce_loss.append(loss)
with tf.device(device_setter):
# Optimizer.
mean_loss = tf.reduce_mean(tf.pack(reduce_loss))
optimizer = tf.train.RMSPropOptimizer(0.01).minimize(mean_loss, global_step=global_step)
init = tf.initialize_all_variables()
# Predictions for the training and test data.
model_prediction = tf.nn.softmax(logits, name="prediction")
label_prediction = tf.argmax(model_prediction, 1, name="predicted_label")
with tf.name_scope('summaries'):
tf.scalar_summary('loss', mean_loss)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(label_prediction, tf.argmax(label_portion, 1))
model_accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary('accuracy', model_accuracy)
merged_summaries = tf.merge_all_summaries()
sv = tf.train.Supervisor(is_chief=True,
graph=graph,
logdir="/tmp/cnn_distributed",
init_op=init,
global_step=global_step)
# Directory to export TensorBoard summary statistics, graph data, etc.
TB_DIR = '/tmp/tensorboard/tf_cnn'
num_steps = 1000
batch_size = 256
with sv.prepare_or_wait_for_session(target, config=sess_config) as session:
writer = tf.train.SummaryWriter(TB_DIR, graph=session.graph)
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {input_data : batch_data, input_labels : batch_labels}
_, l, g_step = session.run(
[optimizer, loss, global_step], feed_dict=feed_dict)
if step % 50 == 0:
print('Minibatch loss at global_step %s: %s' % (g_step, np.mean(l)))
test_dict = {input_data : test_dataset, input_labels : test_labels}
test_accuracy = session.run(model_accuracy, feed_dict=test_dict)
print('Test accuracy: {}'.format(test_accuracy))
writer.close()
| apache-2.0 |
fayf/pyload | module/lib/jinja2/visitor.py | 1402 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| gpl-3.0 |
herilalaina/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 67 | 2702 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
def true_fun(X):
return np.cos(1.5 * np.pi * X)
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="neg_mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, edgecolor='b', s=20, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
schets/scikit-learn | examples/mixture/plot_gmm_sin.py | 247 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
pravsripad/mne-python | tutorials/preprocessing/10_preprocessing_overview.py | 13 | 11064 | # -*- coding: utf-8 -*-
"""
.. _tut-artifact-overview:
==============================
Overview of artifact detection
==============================
This tutorial covers the basics of artifact detection, and introduces the
artifact detection tools available in MNE-Python.
We begin as always by importing the necessary Python modules and loading some
:ref:`example data <sample-dataset>`:
"""
# %%
import os
import numpy as np
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(0, 60).load_data() # just use a fraction of data for speed here
# %%
# What are artifacts?
# ^^^^^^^^^^^^^^^^^^^
#
# Artifacts are parts of the recorded signal that arise from sources other than
# the source of interest (i.e., neuronal activity in the brain). As such,
# artifacts are a form of interference or noise relative to the signal of
# interest. There are many possible causes of such interference, for example:
#
# - Environmental artifacts
# - Persistent oscillations centered around the `AC power line frequency`_
# (typically 50 or 60 Hz)
# - Brief signal jumps due to building vibration (such as a door slamming)
# - Electromagnetic field noise from nearby elevators, cell phones, the
# geomagnetic field, etc.
#
# - Instrumentation artifacts
# - Electromagnetic interference from stimulus presentation (such as EEG
# sensors picking up the field generated by unshielded headphones)
# - Continuous oscillations at specific frequencies used by head position
# indicator (HPI) coils
# - Random high-amplitude fluctuations (or alternatively, constant zero
# signal) in a single channel due to sensor malfunction (e.g., in surface
# electrodes, poor scalp contact)
#
# - Biological artifacts
# - Periodic `QRS`_-like signal patterns (especially in magnetometer
# channels) due to electrical activity of the heart
# - Short step-like deflections (especially in frontal EEG channels) due to
# eye movements
# - Large transient deflections (especially in frontal EEG channels) due to
# blinking
# - Brief bursts of high frequency fluctuations across several channels due
# to the muscular activity during swallowing
#
# There are also some cases where signals from within the brain can be
# considered artifactual. For example, if a researcher is primarily interested
# in the sensory response to a stimulus, but the experimental paradigm involves
# a behavioral response (such as button press), the neural activity associated
# with the planning and executing the button press could be considered an
# artifact relative to signal of interest (i.e., the evoked sensory response).
#
# .. note::
# Artifacts of the same genesis may appear different in recordings made by
# different EEG or MEG systems, due to differences in sensor design (e.g.,
# passive vs. active EEG electrodes; axial vs. planar gradiometers, etc).
#
#
# What to do about artifacts
# ^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# There are 3 basic options when faced with artifacts in your recordings:
#
# 1. *Ignore* the artifact and carry on with analysis
# 2. *Exclude* the corrupted portion of the data and analyze the remaining data
# 3. *Repair* the artifact by suppressing artifactual part of the recording
# while (hopefully) leaving the signal of interest intact
#
# There are many different approaches to repairing artifacts, and MNE-Python
# includes a variety of tools for artifact repair, including digital filtering,
# independent components analysis (ICA), Maxwell filtering / signal-space
# separation (SSS), and signal-space projection (SSP). Separate tutorials
# demonstrate each of these techniques for artifact repair. Many of the
# artifact repair techniques work on both continuous (raw) data and on data
# that has already been epoched (though not necessarily equally well); some can
# be applied to `memory-mapped`_ data while others require the data to be
# copied into RAM. Of course, before you can choose any of these strategies you
# must first *detect* the artifacts, which is the topic of the next section.
#
#
# Artifact detection
# ^^^^^^^^^^^^^^^^^^
#
# MNE-Python includes a few tools for automated detection of certain artifacts
# (such as heartbeats and blinks), but of course you can always visually
# inspect your data to identify and annotate artifacts as well.
#
# We saw in :ref:`the introductory tutorial <tut-overview>` that the example
# data includes :term:`SSP projectors <projector>`, so before we look at
# artifacts let's set aside the projectors in a separate variable and then
# remove them from the :class:`~mne.io.Raw` object using the
# :meth:`~mne.io.Raw.del_proj` method, so that we can inspect our data in it's
# original, raw state:
ssp_projectors = raw.info['projs']
raw.del_proj()
# %%
# Low-frequency drifts
# ~~~~~~~~~~~~~~~~~~~~
#
# Low-frequency drifts are most readily detected by visual inspection using the
# basic :meth:`~mne.io.Raw.plot` method, though it is helpful to plot a
# relatively long time span and to disable channel-wise DC shift correction.
# Here we plot 60 seconds and show all the magnetometer channels:
mag_channels = mne.pick_types(raw.info, meg='mag')
raw.plot(duration=60, order=mag_channels, n_channels=len(mag_channels),
remove_dc=False)
# %%
# Low-frequency drifts are readily removed by high-pass filtering at a fairly
# low cutoff frequency (the wavelength of the drifts seen above is probably
# around 20 seconds, so in this case a cutoff of 0.1 Hz would probably suppress
# most of the drift).
#
#
# Power line noise
# ~~~~~~~~~~~~~~~~
#
# Power line artifacts are easiest to see on plots of the spectrum, so we'll
# use :meth:`~mne.io.Raw.plot_psd` to illustrate.
fig = raw.plot_psd(tmax=np.inf, fmax=250, average=True)
# add some arrows at 60 Hz and its harmonics:
for ax in fig.axes[1:]:
freqs = ax.lines[-1].get_xdata()
psds = ax.lines[-1].get_ydata()
for freq in (60, 120, 180, 240):
idx = np.searchsorted(freqs, freq)
ax.arrow(x=freqs[idx], y=psds[idx] + 18, dx=0, dy=-12, color='red',
width=0.1, head_width=3, length_includes_head=True)
# %%
# Here we see narrow frequency peaks at 60, 120, 180, and 240 Hz — the power
# line frequency of the USA (where the sample data was recorded) and its 2nd,
# 3rd, and 4th harmonics. Other peaks (around 25 to 30 Hz, and the second
# harmonic of those) are probably related to the heartbeat, which is more
# easily seen in the time domain using a dedicated heartbeat detection function
# as described in the next section.
#
#
# Heartbeat artifacts (ECG)
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# MNE-Python includes a dedicated function
# :func:`~mne.preprocessing.find_ecg_events` in the :mod:`mne.preprocessing`
# submodule, for detecting heartbeat artifacts from either dedicated ECG
# channels or from magnetometers (if no ECG channel is present). Additionally,
# the function :func:`~mne.preprocessing.create_ecg_epochs` will call
# :func:`~mne.preprocessing.find_ecg_events` under the hood, and use the
# resulting events array to extract epochs centered around the detected
# heartbeat artifacts. Here we create those epochs, then show an image plot of
# the detected ECG artifacts along with the average ERF across artifacts. We'll
# show all three channel types, even though EEG channels are less strongly
# affected by heartbeat artifacts:
# sphinx_gallery_thumbnail_number = 4
ecg_epochs = mne.preprocessing.create_ecg_epochs(raw)
ecg_epochs.plot_image(combine='mean')
# %%
# The horizontal streaks in the magnetometer image plot reflect the fact that
# the heartbeat artifacts are superimposed on low-frequency drifts like the one
# we saw in an earlier section; to avoid this you could pass
# ``baseline=(-0.5, -0.2)`` in the call to
# :func:`~mne.preprocessing.create_ecg_epochs`.
# You can also get a quick look at the
# ECG-related field pattern across sensors by averaging the ECG epochs together
# via the :meth:`~mne.Epochs.average` method, and then using the
# :meth:`mne.Evoked.plot_topomap` method:
avg_ecg_epochs = ecg_epochs.average().apply_baseline((-0.5, -0.2))
# %%
# Here again we can visualize the spatial pattern of the associated field at
# various times relative to the peak of the EOG response:
avg_ecg_epochs.plot_topomap(times=np.linspace(-0.05, 0.05, 11))
# %%
# Or, we can get an ERP/F plot with :meth:`~mne.Evoked.plot` or a combined
# scalp field maps and ERP/F plot with :meth:`~mne.Evoked.plot_joint`. Here
# we've specified the times for scalp field maps manually, but if not provided
# they will be chosen automatically based on peaks in the signal:
avg_ecg_epochs.plot_joint(times=[-0.25, -0.025, 0, 0.025, 0.25])
# %%
# Ocular artifacts (EOG)
# ~~~~~~~~~~~~~~~~~~~~~~
#
# Similar to the ECG detection and epoching methods described above, MNE-Python
# also includes functions for detecting and extracting ocular artifacts:
# :func:`~mne.preprocessing.find_eog_events` and
# :func:`~mne.preprocessing.create_eog_epochs`. Once again we'll use the
# higher-level convenience function that automatically finds the artifacts and
# extracts them in to an :class:`~mne.Epochs` object in one step. Unlike the
# heartbeat artifacts seen above, ocular artifacts are usually most prominent
# in the EEG channels, but we'll still show all three channel types. We'll use
# the ``baseline`` parameter this time too; note that there are many fewer
# blinks than heartbeats, which makes the image plots appear somewhat blocky:
eog_epochs = mne.preprocessing.create_eog_epochs(raw, baseline=(-0.5, -0.2))
eog_epochs.plot_image(combine='mean')
eog_epochs.average().plot_joint()
# %%
# Summary
# ^^^^^^^
#
# Familiarizing yourself with typical artifact patterns and magnitudes is a
# crucial first step in assessing the efficacy of later attempts to repair
# those artifacts. A good rule of thumb is that the artifact amplitudes should
# be orders of magnitude larger than your signal of interest — and there should
# be several occurrences of such events — in order to find signal
# decompositions that effectively estimate and repair the artifacts.
#
# Several other tutorials in this section illustrate the various tools for
# artifact repair, and discuss the pros and cons of each technique, for
# example:
#
# - :ref:`tut-artifact-ssp`
# - :ref:`tut-artifact-ica`
# - :ref:`tut-artifact-sss`
#
# There are also tutorials on general-purpose preprocessing steps such as
# :ref:`filtering and resampling <tut-filter-resample>` and :ref:`excluding
# bad channels <tut-bad-channels>` or :ref:`spans of data
# <tut-reject-data-spans>`.
#
# .. LINKS
#
# .. _`AC power line frequency`:
# https://en.wikipedia.org/wiki/Mains_electricity
# .. _`QRS`: https://en.wikipedia.org/wiki/QRS_complex
# .. _`memory-mapped`: https://en.wikipedia.org/wiki/Memory-mapped_file
| bsd-3-clause |
tersmitten/ansible | lib/ansible/modules/cloud/google/gcp_bigquery_dataset_facts.py | 10 | 7722 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_bigquery_dataset_facts
description:
- Gather facts for GCP Dataset
short_description: Gather facts for GCP Dataset
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options: {}
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: " a dataset facts"
gcp_bigquery_dataset_facts:
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: facts
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
name:
description:
- Dataset name.
returned: success
type: str
access:
description:
- Access controls on the bucket.
returned: success
type: complex
contains:
domain:
description:
- A domain to grant access to. Any users signed in with the domain specified
will be granted the specified access .
returned: success
type: str
groupByEmail:
description:
- An email address of a Google Group to grant access to.
returned: success
type: str
role:
description:
- Describes the rights granted to the user specified by the other member
of the access object .
returned: success
type: str
specialGroup:
description:
- A special group to grant access to.
returned: success
type: str
userByEmail:
description:
- 'An email address of a user to grant access to. For example: fred@example.com
.'
returned: success
type: str
view:
description:
- A view from a different dataset to grant access to. Queries executed against
that view will have read access to tables in this dataset. The role field
is not required when this field is set. If that view is updated by any
user, access to the view needs to be granted again via an update operation.
returned: success
type: complex
contains:
datasetId:
description:
- The ID of the dataset containing this table.
returned: success
type: str
projectId:
description:
- The ID of the project containing this table.
returned: success
type: str
tableId:
description:
- The ID of the table. The ID must contain only letters (a-z, A-Z),
numbers (0-9), or underscores. The maximum length is 1,024 characters.
returned: success
type: str
creationTime:
description:
- The time when this dataset was created, in milliseconds since the epoch.
returned: success
type: int
datasetReference:
description:
- A reference that identifies the dataset.
returned: success
type: complex
contains:
datasetId:
description:
- A unique ID for this dataset, without the project name. The ID must contain
only letters (a-z, A-Z), numbers (0-9), or underscores. The maximum length
is 1,024 characters.
returned: success
type: str
projectId:
description:
- The ID of the project containing this dataset.
returned: success
type: str
defaultTableExpirationMs:
description:
- The default lifetime of all tables in the dataset, in milliseconds .
returned: success
type: int
description:
description:
- A user-friendly description of the dataset.
returned: success
type: str
friendlyName:
description:
- A descriptive name for the dataset.
returned: success
type: str
id:
description:
- The fully-qualified unique name of the dataset in the format projectId:datasetId.
The dataset name without the project name is given in the datasetId field
.
returned: success
type: str
labels:
description:
- The labels associated with this dataset. You can use these to organize and
group your datasets .
returned: success
type: dict
lastModifiedTime:
description:
- The date when this dataset or any of its tables was last modified, in milliseconds
since the epoch.
returned: success
type: int
location:
description:
- The geographic location where the dataset should reside. Possible values include
EU and US. The default value is US.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict())
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/bigquery']
items = fetch_list(module, collection(module))
if items.get('datasets'):
items = items.get('datasets')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/bigquery/v2/projects/{project}/datasets".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'bigquery')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
xiaolonw/fast-rcnn-normal | tools/reval.py | 50 | 2749 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Reval = re-eval. Re-evaluate saved detections."""
import _init_paths
from fast_rcnn.test import apply_nms
from fast_rcnn.config import cfg
from datasets.factory import get_imdb
import cPickle
import os, sys, argparse
import numpy as np
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Re-evaluate results')
parser.add_argument('output_dir', nargs=1, help='results directory',
type=str)
parser.add_argument('--rerun', dest='rerun',
help=('re-run evaluation code '
'(otherwise: results are loaded from file)'),
action='store_true')
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to re-evaluate',
default='voc_2007_test', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def from_mats(imdb_name, output_dir):
import scipy.io as sio
imdb = get_imdb(imdb_name)
aps = []
for i, cls in enumerate(imdb.classes[1:]):
mat = sio.loadmat(os.path.join(output_dir, cls + '_pr.mat'))
ap = mat['ap'][0, 0] * 100
apAuC = mat['ap_auc'][0, 0] * 100
print '!!! {} : {:.1f} {:.1f}'.format(cls, ap, apAuC)
aps.append(ap)
print '~~~~~~~~~~~~~~~~~~~'
print 'Results (from mat files):'
for ap in aps:
print '{:.1f}'.format(ap)
print '{:.1f}'.format(np.array(aps).mean())
print '~~~~~~~~~~~~~~~~~~~'
def from_dets(imdb_name, output_dir, comp_mode):
imdb = get_imdb(imdb_name)
imdb.competition_mode(comp_mode)
with open(os.path.join(output_dir, 'detections.pkl'), 'rb') as f:
dets = cPickle.load(f)
print 'Applying NMS to all detections'
nms_dets = apply_nms(dets, cfg.TEST.NMS)
print 'Evaluating detections'
imdb.evaluate_detections(nms_dets, output_dir)
if __name__ == '__main__':
args = parse_args()
output_dir = os.path.abspath(args.output_dir[0])
imdb_name = args.imdb_name
if args.comp_mode and not args.rerun:
raise ValueError('--rerun must be used with --comp')
if args.rerun:
from_dets(imdb_name, output_dir, args.comp_mode)
else:
from_mats(imdb_name, output_dir)
| mit |
CCI-Tools/sandbox | notebooks/Marco/open_ESACCI-AEROSOL-AATSR-MOTNHLY.py | 1 | 3160 | from datetime import datetime
from mz_common import extract_time_index, timeseries, subset, ect_open_mfdataset
import xarray as xr
import pandas as pd
'''
"*-ESACCI-L3C_AEROSOL-AER_PRODUCTS-AATSR-ENVISAT-ADV_MOTNHLY-v2.30.nc"
These product have only 2 dimensions 'latitude' and 'longitude'.
This script creates a dataset with a 3rd dimension covering the 'time' dimension.
The time information is extracted from the global attributes
'time_coverage_start' and 'time_coverage_end'
'''
DIR = "/hdd/home/marcoz/EOData/ccitbx/aerosol_all_monthly"
FILE_GLOB = "*-ESACCI-L3C_AEROSOL-AER_PRODUCTS-AATSR-ENVISAT-ADV_MOTNHLY-v2.30.nc"
file_paths = "%s/%s" % (DIR, FILE_GLOB)
def combine(datasets):
time_index = [extract_time_index(ds) for ds in datasets]
return xr.concat(datasets, pd.Index(time_index, name='time'))
print("===================================================")
print("using xarray")
ds = ect_open_mfdataset(file_paths, combine=combine)
time_series = timeseries(ds['AOD550_mean'], lat=0, lon=0)
sub = subset(ds,
lat_min=30., lat_max=45.,
lon_min=-60., lon_max=-45.,
time_min=datetime(2003, 1, 1), time_max=datetime(2004, 1, 1),
)
ds.close()
print("===================================================")
print("using xarray + dask")
ds = ect_open_mfdataset(file_paths, chunks={'latitude': 180, 'longitude': 360}, combine=combine)
time_series = timeseries(ds['AOD550_mean'], lat=0, lon=0)
sub = subset(ds,
lat_min=30., lat_max=45.,
lon_min=-60., lon_max=-45.,
time_min=datetime(2003, 1, 1), time_max=datetime(2004, 1, 1),
)
ds.close()
# print(ds)
'''
===================================================
using xarray
num datasets: 116
TIME for open : 0:00:00.916795
TIME for combine : 0:00:02.849429
===================================================
using xarray + dask
num datasets: 116
TIME for open : 0:00:01.523071
TIME for combine : 0:00:02.081897
===================================================
dimensions: Frozen(SortedKeysDict({'time': 116, 'longitude': 360, 'latitude': 180}))
===================================================
time series (lat/lon)
===================================================
TIME for time_series: 0:00:00.005606
<xarray.DataArray 'AOD550_mean' (time: 116)>
dask.array<getitem..., shape=(116,), dtype=float64, chunksize=(1,)>
Coordinates:
latitude float32 0.5
longitude float32 0.5
* time (time) datetime64[ns] 2002-08-31T23:59:59 2002-09-30T23:59:59 ...
Attributes:
long_name: mean aerosol optical density at 550 nm
standard_name: atmosphere_optical_thickness_due_to_ambient_aerosol
units: 1
valid_range: [ 0. 4.]
TIME for ts_load : 0:00:00.394161
===================================================
subset (lat/lon/time)
===================================================
TIME for subset : 0:00:00.014428
TIME for subset load : 0:00:00.704024
dimensions: Frozen(SortedKeysDict({'time': 12, 'longitude': 15, 'latitude': 15}))
===================================================
'''
| gpl-3.0 |
EderSantana/fuel | tests/test_serialization.py | 3 | 1400 | import os
import tempfile
import numpy
from six.moves import cPickle
from fuel.streams import DataStream
from fuel.datasets import MNIST
from fuel.schemes import SequentialScheme
from tests import skip_if_not_available
def test_in_memory():
skip_if_not_available(datasets=['mnist.hdf5'])
# Load MNIST and get two batches
mnist = MNIST('train', load_in_memory=True)
data_stream = DataStream(mnist, iteration_scheme=SequentialScheme(
examples=mnist.num_examples, batch_size=256))
epoch = data_stream.get_epoch_iterator()
for i, (features, targets) in enumerate(epoch):
if i == 1:
break
handle = mnist.open()
known_features, _ = mnist.get_data(handle, slice(256, 512))
mnist.close(handle)
assert numpy.all(features == known_features)
# Pickle the epoch and make sure that the data wasn't dumped
with tempfile.NamedTemporaryFile(delete=False) as f:
filename = f.name
cPickle.dump(epoch, f)
assert os.path.getsize(filename) < 1024 * 1024 # Less than 1MB
# Reload the epoch and make sure that the state was maintained
del epoch
with open(filename, 'rb') as f:
epoch = cPickle.load(f)
features, targets = next(epoch)
handle = mnist.open()
known_features, _ = mnist.get_data(handle, slice(512, 768))
mnist.close(handle)
assert numpy.all(features == known_features)
| mit |
shareactorIO/pipeline | source.ml/jupyterhub.ml/notebooks/zz_old/TensorFlow/SkFlow_DEPRECATED/resnet.py | 6 | 6093 | # Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example builds deep residual network for mnist data.
Reference Paper: http://arxiv.org/pdf/1512.03385.pdf
Note that this is still a work-in-progress. Feel free to submit a PR
to make this better.
"""
import os
from sklearn import metrics
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import skflow
from collections import namedtuple
from math import sqrt
def res_net(x, y, activation=tf.nn.relu):
"""Builds a residual network. Note that if the input tensor is 2D, it must be
square in order to be converted to a 4D tensor.
Borrowed structure from here: https://github.com/pkmital/tensorflow_tutorials/blob/master/10_residual_network.py
Args:
x: Input of the network
y: Output of the network
activation: Activation function to apply after each convolution
"""
# Configurations for each bottleneck block
BottleneckBlock = namedtuple(
'BottleneckBlock', ['num_layers', 'num_filters', 'bottleneck_size'])
blocks = [BottleneckBlock(3, 128, 32),
BottleneckBlock(3, 256, 64),
BottleneckBlock(3, 512, 128),
BottleneckBlock(3, 1024, 256)]
input_shape = x.get_shape().as_list()
# Reshape the input into the right shape if it's 2D tensor
if len(input_shape) == 2:
ndim = int(sqrt(input_shape[1]))
x = tf.reshape(x, [-1, ndim, ndim, 1])
# First convolution expands to 64 channels
with tf.variable_scope('conv_layer1'):
net = skflow.ops.conv2d(x, 64, [7, 7], batch_norm=True,
activation=activation, bias=False)
# Max pool
net = tf.nn.max_pool(
net, [1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
# First chain of resnets
with tf.variable_scope('conv_layer2'):
net = skflow.ops.conv2d(net, blocks[0].num_filters,
[1, 1], [1, 1, 1, 1],
padding='VALID', bias=True)
# Create each bottleneck building block for each layer
for block_i, block in enumerate(blocks):
for layer_i in range(block.num_layers):
name = 'block_%d/layer_%d' % (block_i, layer_i)
# 1x1 convolution responsible for reducing dimension
with tf.variable_scope(name + '/conv_in'):
conv = skflow.ops.conv2d(net, block.num_filters,
[1, 1], [1, 1, 1, 1],
padding='VALID',
activation=activation,
batch_norm=True,
bias=False)
with tf.variable_scope(name + '/conv_bottleneck'):
conv = skflow.ops.conv2d(conv, block.bottleneck_size,
[3, 3], [1, 1, 1, 1],
padding='SAME',
activation=activation,
batch_norm=True,
bias=False)
# 1x1 convolution responsible for restoring dimension
with tf.variable_scope(name + '/conv_out'):
conv = skflow.ops.conv2d(conv, block.num_filters,
[1, 1], [1, 1, 1, 1],
padding='VALID',
activation=activation,
batch_norm=True,
bias=False)
# shortcut connections that turn the network into its counterpart
# residual function (identity shortcut)
net = conv + net
try:
# upscale to the next block size
next_block = blocks[block_i + 1]
with tf.variable_scope('block_%d/conv_upscale' % block_i):
net = skflow.ops.conv2d(net, next_block.num_filters,
[1, 1], [1, 1, 1, 1],
bias=False,
padding='SAME')
except IndexError:
pass
net_shape = net.get_shape().as_list()
net = tf.nn.avg_pool(net,
ksize=[1, net_shape[1], net_shape[2], 1],
strides=[1, 1, 1, 1], padding='VALID')
net_shape = net.get_shape().as_list()
net = tf.reshape(net, [-1, net_shape[1] * net_shape[2] * net_shape[3]])
return skflow.models.logistic_regression(net, y)
# Download and load MNIST data.
mnist = input_data.read_data_sets('MNIST_data')
# Restore model if graph is saved into a folder.
if os.path.exists("models/resnet/graph.pbtxt"):
classifier = skflow.TensorFlowEstimator.restore("models/resnet/")
else:
# Create a new resnet classifier.
classifier = skflow.TensorFlowEstimator(
model_fn=res_net, n_classes=10, batch_size=100, steps=100,
learning_rate=0.001, continue_training=True)
while True:
# Train model and save summaries into logdir.
classifier.fit(mnist.train.images, mnist.train.labels, logdir="models/resnet/")
# Calculate accuracy.
score = metrics.accuracy_score(
mnist.test.labels, classifier.predict(mnist.test.images, batch_size=64))
print('Accuracy: {0:f}'.format(score))
# Save model graph and checkpoints.
classifier.save("models/resnet/")
| apache-2.0 |
marmarko/ml101 | tensorflow/examples/skflow/iris_custom_decay_dnn.py | 56 | 1959 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
def optimizer_exp_decay():
global_step = tf.contrib.framework.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
return tf.train.AdagradOptimizer(learning_rate=learning_rate)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
optimizer=optimizer_exp_decay)
classifier.fit(x_train, y_train, steps=800)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| bsd-2-clause |
pravsripad/mne-python | mne/decoding/tests/test_receptive_field.py | 11 | 22701 | # Authors: Chris Holdgraf <choldgraf@gmail.com>
#
# License: BSD-3-Clause
import os.path as op
import pytest
import numpy as np
from numpy import einsum
from numpy.fft import rfft, irfft
from numpy.testing import assert_array_equal, assert_allclose, assert_equal
from mne.utils import requires_sklearn
from mne.decoding import ReceptiveField, TimeDelayingRidge
from mne.decoding.receptive_field import (_delay_time_series, _SCORERS,
_times_to_delays, _delays_to_slice)
from mne.decoding.time_delaying_ridge import (_compute_reg_neighbors,
_compute_corrs)
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.1, 0.5
event_id = dict(aud_l=1, vis_l=3)
# Loading raw data
n_jobs_test = (1, 'cuda')
def test_compute_reg_neighbors():
"""Test fast calculation of laplacian regularizer."""
for reg_type in (
('ridge', 'ridge'),
('ridge', 'laplacian'),
('laplacian', 'ridge'),
('laplacian', 'laplacian')):
for n_ch_x, n_delays in (
(1, 1), (1, 2), (2, 1), (1, 3), (3, 1), (1, 4), (4, 1),
(2, 2), (2, 3), (3, 2), (3, 3),
(2, 4), (4, 2), (3, 4), (4, 3), (4, 4),
(5, 4), (4, 5), (5, 5),
(20, 9), (9, 20)):
for normed in (True, False):
reg_direct = _compute_reg_neighbors(
n_ch_x, n_delays, reg_type, 'direct', normed=normed)
reg_csgraph = _compute_reg_neighbors(
n_ch_x, n_delays, reg_type, 'csgraph', normed=normed)
assert_allclose(
reg_direct, reg_csgraph, atol=1e-7,
err_msg='%s: %s' % (reg_type, (n_ch_x, n_delays)))
@requires_sklearn
def test_rank_deficiency():
"""Test signals that are rank deficient."""
# See GH#4253
from sklearn.linear_model import Ridge
N = 256
fs = 1.
tmin, tmax = -50, 100
reg = 0.1
rng = np.random.RandomState(0)
eeg = rng.randn(N, 1)
eeg *= 100
eeg = rfft(eeg, axis=0)
eeg[N // 4:] = 0 # rank-deficient lowpass
eeg = irfft(eeg, axis=0)
win = np.hanning(N // 8)
win /= win.mean()
y = np.apply_along_axis(np.convolve, 0, eeg, win, mode='same')
y += rng.randn(*y.shape) * 100
for est in (Ridge(reg), reg):
rf = ReceptiveField(tmin, tmax, fs, estimator=est, patterns=True)
rf.fit(eeg, y)
pred = rf.predict(eeg)
assert_equal(y.shape, pred.shape)
corr = np.corrcoef(y.ravel(), pred.ravel())[0, 1]
assert corr > 0.995
def test_time_delay():
"""Test that time-delaying w/ times and samples works properly."""
# Explicit delays + sfreq
X = np.random.RandomState(0).randn(1000, 2)
assert (X == 0).sum() == 0 # need this for later
test_tlims = [
((1, 2), 1),
((1, 1), 1),
((0, 2), 1),
((0, 1), 1),
((0, 0), 1),
((-1, 2), 1),
((-1, 1), 1),
((-1, 0), 1),
((-1, -1), 1),
((-2, 2), 1),
((-2, 1), 1),
((-2, 0), 1),
((-2, -1), 1),
((-2, -1), 1),
((0, .2), 10),
((-.1, .1), 10)]
for (tmin, tmax), isfreq in test_tlims:
# sfreq must be int/float
with pytest.raises(TypeError, match='`sfreq` must be an instance of'):
_delay_time_series(X, tmin, tmax, sfreq=[1])
# Delays must be int/float
with pytest.raises(TypeError, match='.*complex.*'):
_delay_time_series(X, np.complex128(tmin), tmax, 1)
# Make sure swapaxes works
start, stop = int(round(tmin * isfreq)), int(round(tmax * isfreq)) + 1
n_delays = stop - start
X_delayed = _delay_time_series(X, tmin, tmax, isfreq)
assert_equal(X_delayed.shape, (1000, 2, n_delays))
# Make sure delay slice is correct
delays = _times_to_delays(tmin, tmax, isfreq)
assert_array_equal(delays, np.arange(start, stop))
keep = _delays_to_slice(delays)
expected = np.where((X_delayed != 0).all(-1).all(-1))[0]
got = np.arange(len(X_delayed))[keep]
assert_array_equal(got, expected)
assert X_delayed[keep].shape[-1] > 0
assert (X_delayed[keep] == 0).sum() == 0
del_zero = int(round(-tmin * isfreq))
for ii in range(-2, 3):
idx = del_zero + ii
err_msg = '[%s,%s] (%s): %s %s' % (tmin, tmax, isfreq, ii, idx)
if 0 <= idx < X_delayed.shape[-1]:
if ii == 0:
assert_array_equal(X_delayed[:, :, idx], X,
err_msg=err_msg)
elif ii < 0: # negative delay
assert_array_equal(X_delayed[:ii, :, idx], X[-ii:, :],
err_msg=err_msg)
assert_array_equal(X_delayed[ii:, :, idx], 0.)
else:
assert_array_equal(X_delayed[ii:, :, idx], X[:-ii, :],
err_msg=err_msg)
assert_array_equal(X_delayed[:ii, :, idx], 0.)
@pytest.mark.slowtest # slow on Azure
@pytest.mark.parametrize('n_jobs', n_jobs_test)
@requires_sklearn
def test_receptive_field_basic(n_jobs):
"""Test model prep and fitting."""
from sklearn.linear_model import Ridge
# Make sure estimator pulling works
mod = Ridge()
rng = np.random.RandomState(1337)
# Test the receptive field model
# Define parameters for the model and simulate inputs + weights
tmin, tmax = -10., 0
n_feats = 3
rng = np.random.RandomState(0)
X = rng.randn(10000, n_feats)
w = rng.randn(int((tmax - tmin) + 1) * n_feats)
# Delay inputs and cut off first 4 values since they'll be cut in the fit
X_del = np.concatenate(
_delay_time_series(X, tmin, tmax, 1.).transpose(2, 0, 1), axis=1)
y = np.dot(X_del, w)
# Fit the model and test values
feature_names = ['feature_%i' % ii for ii in [0, 1, 2]]
rf = ReceptiveField(tmin, tmax, 1, feature_names, estimator=mod,
patterns=True)
rf.fit(X, y)
assert_array_equal(rf.delays_, np.arange(tmin, tmax + 1))
y_pred = rf.predict(X)
assert_allclose(y[rf.valid_samples_], y_pred[rf.valid_samples_], atol=1e-2)
scores = rf.score(X, y)
assert scores > .99
assert_allclose(rf.coef_.T.ravel(), w, atol=1e-3)
# Make sure different input shapes work
rf.fit(X[:, np.newaxis:], y[:, np.newaxis])
rf.fit(X, y[:, np.newaxis])
with pytest.raises(ValueError, match='If X has 3 .* y must have 2 or 3'):
rf.fit(X[..., np.newaxis], y)
with pytest.raises(ValueError, match='X must be shape'):
rf.fit(X[:, 0], y)
with pytest.raises(ValueError, match='X and y do not have the same n_epo'):
rf.fit(X[:, np.newaxis], np.tile(y[:, np.newaxis, np.newaxis],
[1, 2, 1]))
with pytest.raises(ValueError, match='X and y do not have the same n_tim'):
rf.fit(X, y[:-2])
with pytest.raises(ValueError, match='n_features in X does not match'):
rf.fit(X[:, :1], y)
# auto-naming features
feature_names = ['feature_%s' % ii for ii in [0, 1, 2]]
rf = ReceptiveField(tmin, tmax, 1, estimator=mod,
feature_names=feature_names)
assert_equal(rf.feature_names, feature_names)
rf = ReceptiveField(tmin, tmax, 1, estimator=mod)
rf.fit(X, y)
assert_equal(rf.feature_names, None)
# Float becomes ridge
rf = ReceptiveField(tmin, tmax, 1, ['one', 'two', 'three'], estimator=0)
str(rf) # repr works before fit
rf.fit(X, y)
assert isinstance(rf.estimator_, TimeDelayingRidge)
str(rf) # repr works after fit
rf = ReceptiveField(tmin, tmax, 1, ['one'], estimator=0)
rf.fit(X[:, [0]], y)
str(rf) # repr with one feature
# Should only accept estimators or floats
with pytest.raises(ValueError, match='`estimator` must be a float or'):
ReceptiveField(tmin, tmax, 1, estimator='foo').fit(X, y)
with pytest.raises(ValueError, match='`estimator` must be a float or'):
ReceptiveField(tmin, tmax, 1, estimator=np.array([1, 2, 3])).fit(X, y)
with pytest.raises(ValueError, match='tmin .* must be at most tmax'):
ReceptiveField(5, 4, 1).fit(X, y)
# scorers
for key, val in _SCORERS.items():
rf = ReceptiveField(tmin, tmax, 1, ['one'],
estimator=0, scoring=key, patterns=True)
rf.fit(X[:, [0]], y)
y_pred = rf.predict(X[:, [0]]).T.ravel()[:, np.newaxis]
assert_allclose(val(y[:, np.newaxis], y_pred,
multioutput='raw_values'),
rf.score(X[:, [0]], y), rtol=1e-2)
with pytest.raises(ValueError, match='inputs must be shape'):
_SCORERS['corrcoef'](y.ravel(), y_pred, multioutput='raw_values')
# Need correct scorers
with pytest.raises(ValueError, match='scoring must be one of'):
ReceptiveField(tmin, tmax, 1., scoring='foo').fit(X, y)
@pytest.mark.parametrize('n_jobs', n_jobs_test)
def test_time_delaying_fast_calc(n_jobs):
"""Test time delaying and fast calculations."""
X = np.array([[1, 2, 3], [5, 7, 11]]).T
# all negative
smin, smax = 1, 2
X_del = _delay_time_series(X, smin, smax, 1.)
# (n_times, n_features, n_delays) -> (n_times, n_features * n_delays)
X_del.shape = (X.shape[0], -1)
expected = np.array([[0, 1, 2], [0, 0, 1], [0, 5, 7], [0, 0, 5]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = [[5, 2, 19, 10], [2, 1, 7, 5], [19, 7, 74, 35], [10, 5, 35, 25]]
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# all positive
smin, smax = -2, -1
X_del = _delay_time_series(X, smin, smax, 1.)
X_del.shape = (X.shape[0], -1)
expected = np.array([[3, 0, 0], [2, 3, 0], [11, 0, 0], [7, 11, 0]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = [[9, 6, 33, 21], [6, 13, 22, 47],
[33, 22, 121, 77], [21, 47, 77, 170]]
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# both sides
smin, smax = -1, 1
X_del = _delay_time_series(X, smin, smax, 1.)
X_del.shape = (X.shape[0], -1)
expected = np.array([[2, 3, 0], [1, 2, 3], [0, 1, 2],
[7, 11, 0], [5, 7, 11], [0, 5, 7]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = [[13, 8, 3, 47, 31, 15],
[8, 14, 8, 29, 52, 31],
[3, 8, 5, 11, 29, 19],
[47, 29, 11, 170, 112, 55],
[31, 52, 29, 112, 195, 112],
[15, 31, 19, 55, 112, 74]]
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# slightly harder to get the non-Toeplitz correction correct
X = np.array([[1, 2, 3, 5]]).T
smin, smax = 0, 3
X_del = _delay_time_series(X, smin, smax, 1.)
X_del.shape = (X.shape[0], -1)
expected = np.array([[1, 2, 3, 5], [0, 1, 2, 3],
[0, 0, 1, 2], [0, 0, 0, 1]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = [[39, 23, 13, 5], [23, 14, 8, 3], [13, 8, 5, 2], [5, 3, 2, 1]]
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# even worse
X = np.array([[1, 2, 3], [5, 7, 11]]).T
smin, smax = 0, 2
X_del = _delay_time_series(X, smin, smax, 1.)
X_del.shape = (X.shape[0], -1)
expected = np.array([[1, 2, 3], [0, 1, 2], [0, 0, 1],
[5, 7, 11], [0, 5, 7], [0, 0, 5]]).T
assert_allclose(X_del, expected)
Xt_X = np.dot(X_del.T, X_del)
expected = np.array([[14, 8, 3, 52, 31, 15],
[8, 5, 2, 29, 19, 10],
[3, 2, 1, 11, 7, 5],
[52, 29, 11, 195, 112, 55],
[31, 19, 7, 112, 74, 35],
[15, 10, 5, 55, 35, 25]])
assert_allclose(Xt_X, expected)
x_xt = _compute_corrs(X, np.zeros((X.shape[0], 1)), smin, smax + 1)[0]
assert_allclose(x_xt, expected)
# And a bunch of random ones for good measure
rng = np.random.RandomState(0)
X = rng.randn(25, 3)
y = np.empty((25, 2))
vals = (0, -1, 1, -2, 2, -11, 11)
for smax in vals:
for smin in vals:
if smin > smax:
continue
for ii in range(X.shape[1]):
kernel = rng.randn(smax - smin + 1)
kernel -= np.mean(kernel)
y[:, ii % y.shape[-1]] = np.convolve(X[:, ii], kernel, 'same')
x_xt, x_yt, n_ch_x, _, _ = _compute_corrs(X, y, smin, smax + 1)
X_del = _delay_time_series(X, smin, smax, 1., fill_mean=False)
x_yt_true = einsum('tfd,to->ofd', X_del, y)
x_yt_true = np.reshape(x_yt_true, (x_yt_true.shape[0], -1)).T
assert_allclose(x_yt, x_yt_true, atol=1e-7, err_msg=(smin, smax))
X_del.shape = (X.shape[0], -1)
x_xt_true = np.dot(X_del.T, X_del).T
assert_allclose(x_xt, x_xt_true, atol=1e-7, err_msg=(smin, smax))
@pytest.mark.parametrize('n_jobs', n_jobs_test)
@requires_sklearn
def test_receptive_field_1d(n_jobs):
"""Test that the fast solving works like Ridge."""
from sklearn.linear_model import Ridge
rng = np.random.RandomState(0)
x = rng.randn(500, 1)
for delay in range(-2, 3):
y = np.zeros(500)
slims = [(-2, 4)]
if delay == 0:
y[:] = x[:, 0]
elif delay < 0:
y[:delay] = x[-delay:, 0]
slims += [(-4, -1)]
else:
y[delay:] = x[:-delay, 0]
slims += [(1, 2)]
for ndim in (1, 2):
y.shape = (y.shape[0],) + (1,) * (ndim - 1)
for slim in slims:
smin, smax = slim
lap = TimeDelayingRidge(smin, smax, 1., 0.1, 'laplacian',
fit_intercept=False, n_jobs=n_jobs)
for estimator in (Ridge(alpha=0.), Ridge(alpha=0.1), 0., 0.1,
lap):
for offset in (-100, 0, 100):
model = ReceptiveField(smin, smax, 1.,
estimator=estimator,
n_jobs=n_jobs)
use_x = x + offset
model.fit(use_x, y)
if estimator is lap:
continue # these checks are too stringent
assert_allclose(model.estimator_.intercept_, -offset,
atol=1e-1)
assert_array_equal(model.delays_,
np.arange(smin, smax + 1))
expected = (model.delays_ == delay).astype(float)
expected = expected[np.newaxis] # features
if y.ndim == 2:
expected = expected[np.newaxis] # outputs
assert_equal(model.coef_.ndim, ndim + 1)
assert_allclose(model.coef_, expected, atol=1e-3)
start = model.valid_samples_.start or 0
stop = len(use_x) - (model.valid_samples_.stop or 0)
assert stop - start >= 495
assert_allclose(
model.predict(use_x)[model.valid_samples_],
y[model.valid_samples_], atol=1e-2)
score = np.mean(model.score(use_x, y))
assert score > 0.9999
@pytest.mark.parametrize('n_jobs', n_jobs_test)
@requires_sklearn
def test_receptive_field_nd(n_jobs):
"""Test multidimensional support."""
from sklearn.linear_model import Ridge
# multidimensional
rng = np.random.RandomState(3)
x = rng.randn(1000, 3)
y = np.zeros((1000, 2))
smin, smax = 0, 5
# This is a weird assignment, but it's just a way to distribute some
# unique values at various delays, and "expected" explains how they
# should appear in the resulting RF
for ii in range(1, 5):
y[ii:, ii % 2] += (-1) ** ii * ii * x[:-ii, ii % 3]
y -= np.mean(y, axis=0)
x -= np.mean(x, axis=0)
x_off = x + 1e3
expected = [
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4, 0],
[0, 0, 2, 0, 0, 0]],
[[0, 0, 0, -3, 0, 0],
[0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
]
tdr_l = TimeDelayingRidge(smin, smax, 1., 0.1, 'laplacian', n_jobs=n_jobs)
tdr_nc = TimeDelayingRidge(smin, smax, 1., 0.1, n_jobs=n_jobs,
edge_correction=False)
for estimator, atol in zip((Ridge(alpha=0.), 0., 0.01, tdr_l, tdr_nc),
(1e-3, 1e-3, 1e-3, 5e-3, 5e-2)):
model = ReceptiveField(smin, smax, 1.,
estimator=estimator)
model.fit(x, y)
assert_array_equal(model.delays_,
np.arange(smin, smax + 1))
assert_allclose(model.coef_, expected, atol=atol)
tdr = TimeDelayingRidge(smin, smax, 1., 0.01, reg_type='foo',
n_jobs=n_jobs)
model = ReceptiveField(smin, smax, 1., estimator=tdr)
with pytest.raises(ValueError, match='reg_type entries must be one of'):
model.fit(x, y)
tdr = TimeDelayingRidge(smin, smax, 1., 0.01, reg_type=['laplacian'],
n_jobs=n_jobs)
model = ReceptiveField(smin, smax, 1., estimator=tdr)
with pytest.raises(ValueError, match='reg_type must have two elements'):
model.fit(x, y)
model = ReceptiveField(smin, smax, 1, estimator=tdr, fit_intercept=False)
with pytest.raises(ValueError, match='fit_intercept'):
model.fit(x, y)
# Now check the intercept_
tdr = TimeDelayingRidge(smin, smax, 1., 0., n_jobs=n_jobs)
tdr_no = TimeDelayingRidge(smin, smax, 1., 0., fit_intercept=False,
n_jobs=n_jobs)
for estimator in (Ridge(alpha=0.), tdr,
Ridge(alpha=0., fit_intercept=False), tdr_no):
# first with no intercept in the data
model = ReceptiveField(smin, smax, 1., estimator=estimator)
model.fit(x, y)
assert_allclose(model.estimator_.intercept_, 0., atol=1e-7,
err_msg=repr(estimator))
assert_allclose(model.coef_, expected, atol=1e-3,
err_msg=repr(estimator))
y_pred = model.predict(x)
assert_allclose(y_pred[model.valid_samples_],
y[model.valid_samples_],
atol=1e-2, err_msg=repr(estimator))
score = np.mean(model.score(x, y))
assert score > 0.9999
# now with an intercept in the data
model.fit(x_off, y)
if estimator.fit_intercept:
val = [-6000, 4000]
itol = 0.5
ctol = 5e-4
else:
val = itol = 0.
ctol = 2.
assert_allclose(model.estimator_.intercept_, val, atol=itol,
err_msg=repr(estimator))
assert_allclose(model.coef_, expected, atol=ctol, rtol=ctol,
err_msg=repr(estimator))
if estimator.fit_intercept:
ptol = 1e-2
stol = 0.999999
else:
ptol = 10
stol = 0.6
y_pred = model.predict(x_off)[model.valid_samples_]
assert_allclose(y_pred, y[model.valid_samples_],
atol=ptol, err_msg=repr(estimator))
score = np.mean(model.score(x_off, y))
assert score > stol, estimator
model = ReceptiveField(smin, smax, 1., fit_intercept=False)
model.fit(x_off, y)
assert_allclose(model.estimator_.intercept_, 0., atol=1e-7)
score = np.mean(model.score(x_off, y))
assert score > 0.6
def _make_data(n_feats, n_targets, n_samples, tmin, tmax):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_feats)
w = rng.randn(int((tmax - tmin) + 1) * n_feats, n_targets)
# Delay inputs
X_del = np.concatenate(
_delay_time_series(X, tmin, tmax, 1.).transpose(2, 0, 1), axis=1)
y = np.dot(X_del, w)
return X, y
@requires_sklearn
def test_inverse_coef():
"""Test inverse coefficients computation."""
from sklearn.linear_model import Ridge
tmin, tmax = 0., 10.
n_feats, n_targets, n_samples = 3, 2, 1000
n_delays = int((tmax - tmin) + 1)
# Check coefficient dims, for all estimator types
X, y = _make_data(n_feats, n_targets, n_samples, tmin, tmax)
tdr = TimeDelayingRidge(tmin, tmax, 1., 0.1, 'laplacian')
for estimator in (0., 0.01, Ridge(alpha=0.), tdr):
rf = ReceptiveField(tmin, tmax, 1., estimator=estimator,
patterns=True)
rf.fit(X, y)
inv_rf = ReceptiveField(tmin, tmax, 1., estimator=estimator,
patterns=True)
inv_rf.fit(y, X)
assert_array_equal(rf.coef_.shape, rf.patterns_.shape,
(n_targets, n_feats, n_delays))
assert_array_equal(inv_rf.coef_.shape, inv_rf.patterns_.shape,
(n_feats, n_targets, n_delays))
# we should have np.dot(patterns.T,coef) ~ np.eye(n)
c0 = rf.coef_.reshape(n_targets, n_feats * n_delays)
c1 = rf.patterns_.reshape(n_targets, n_feats * n_delays)
assert_allclose(np.dot(c0, c1.T), np.eye(c0.shape[0]), atol=0.2)
@requires_sklearn
def test_linalg_warning():
"""Test that warnings are issued when no regularization is applied."""
from sklearn.linear_model import Ridge
n_feats, n_targets, n_samples = 5, 60, 50
X, y = _make_data(n_feats, n_targets, n_samples, tmin, tmax)
for estimator in (0., Ridge(alpha=0.)):
rf = ReceptiveField(tmin, tmax, 1., estimator=estimator)
with pytest.warns((RuntimeWarning, UserWarning),
match='[Singular|scipy.linalg.solve]'):
rf.fit(y, X)
| bsd-3-clause |
pytroll/pyresample | pyresample/future/resamplers/resampler.py | 1 | 9727 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019-2021 Pyresample developers
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Base resampler class made for subclassing."""
from __future__ import annotations
import abc
import hashlib
import json
import logging
from typing import Optional, Union
try:
import xarray as xr
except ImportError:
xr = None
from pyresample.geometry import AreaDefinition, CoordinateDefinition
logger = logging.getLogger(__name__)
HashType = hashlib._hashlib.HASH
def hash_dict(the_dict: dict, existing_hash: Optional[HashType] = None) -> HashType:
"""Calculate a hash for a dictionary and optionally update an existing hash."""
if existing_hash is None:
existing_hash = hashlib.sha1()
existing_hash.update(json.dumps(the_dict, sort_keys=True).encode('utf-8'))
return existing_hash
def hash_resampler_geometries(source_geo_def, target_geo_def, **kwargs) -> str:
"""Get hash for the geometries used by a resampler with extra *kwargs*."""
resampler_hash = source_geo_def.update_hash()
resampler_hash = target_geo_def.update_hash(resampler_hash)
resampler_hash = hash_dict(kwargs, resampler_hash)
return resampler_hash.hexdigest()
def _data_arr_needs_xy_coords(data_arr, area):
coords_exist = 'x' in data_arr.coords and 'y' in data_arr.coords
no_xy_dims = 'x' not in data_arr.dims or 'y' not in data_arr.dims
has_proj_vectors = hasattr(area, 'get_proj_vectors')
return not (coords_exist or no_xy_dims or not has_proj_vectors)
def _add_xy_units(crs, x_attrs, y_attrs):
if crs is not None:
units = crs.axis_info[0].unit_name
# fix udunits/CF standard units
units = units.replace('metre', 'meter')
if units == 'degree':
y_attrs['units'] = 'degrees_north'
x_attrs['units'] = 'degrees_east'
else:
y_attrs['units'] = units
x_attrs['units'] = units
def add_xy_coords(data_arr, area, crs=None):
"""Assign x/y coordinates to DataArray from provided area.
If 'x' and 'y' coordinates already exist then they will not be added.
Args:
data_arr (xarray.DataArray): data object to add x/y coordinates to
area (pyresample.geometry.AreaDefinition): area providing the
coordinate data.
crs (pyproj.crs.CRS or None): CRS providing additional information
about the area's coordinate reference system if available.
Requires pyproj 2.0+.
Returns (xarray.DataArray): Updated DataArray object
"""
if not _data_arr_needs_xy_coords(data_arr, area):
return data_arr
x, y = area.get_proj_vectors()
# convert to DataArrays
y_attrs = {}
x_attrs = {}
_add_xy_units(crs, x_attrs, y_attrs)
y = xr.DataArray(y, dims=('y',), attrs=y_attrs)
x = xr.DataArray(x, dims=('x',), attrs=x_attrs)
return data_arr.assign_coords(y=y, x=x)
def _find_and_assign_crs(data_arr, area):
# add CRS object if pyproj 2.0+
try:
from pyproj import CRS
except ImportError:
logger.debug("Could not add 'crs' coordinate with pyproj<2.0")
crs = None
else:
# default lat/lon projection
latlon_proj = "+proj=latlong +datum=WGS84 +ellps=WGS84"
# otherwise get it from the area definition
if hasattr(area, 'crs'):
crs = area.crs
else:
proj_str = getattr(area, 'proj_str', latlon_proj)
crs = CRS.from_string(proj_str)
data_arr = data_arr.assign_coords(crs=crs)
return data_arr, crs
def _update_swath_lonlat_attrs(area):
# add lon/lat arrays for swath definitions
# SwathDefinitions created by Satpy should be assigning DataArray
# objects as the lons/lats attributes so use those directly to
# maintain original .attrs metadata (instead of converting to dask
# array).
lons = area.lons
lats = area.lats
if lons.ndim > 2:
return
if not isinstance(lons, xr.DataArray):
dims = ('y', 'x') if lons.ndim == 2 else ('y',)
lons = xr.DataArray(lons, dims=dims)
lats = xr.DataArray(lats, dims=dims)
lons.attrs.setdefault('standard_name', 'longitude')
lons.attrs.setdefault('long_name', 'longitude')
lons.attrs.setdefault('units', 'degrees_east')
lats.attrs.setdefault('standard_name', 'latitude')
lats.attrs.setdefault('long_name', 'latitude')
lats.attrs.setdefault('units', 'degrees_north')
# See https://github.com/pydata/xarray/issues/3068
# data_arr = data_arr.assign_coords(longitude=lons, latitude=lats)
def add_crs_xy_coords(data_arr, area):
"""Add :class:`pyproj.crs.CRS` and x/y or lons/lats to coordinates.
For SwathDefinition or GridDefinition areas this will add a
`crs` coordinate and coordinates for the 2D arrays of `lons` and `lats`.
For AreaDefinition areas this will add a `crs` coordinate and the
1-dimensional `x` and `y` coordinate variables.
Args:
data_arr (xarray.DataArray): DataArray to add the 'crs'
coordinate.
area (pyresample.geometry.AreaDefinition): Area to get CRS
information from.
"""
data_arr, crs = _find_and_assign_crs(data_arr, area)
# Add x/y coordinates if possible
if isinstance(area, CoordinateDefinition):
_update_swath_lonlat_attrs(area)
else:
# Gridded data (AreaDefinition/StackedAreaDefinition)
data_arr = add_xy_coords(data_arr, area, crs=crs)
return data_arr
def update_resampled_coords(old_data, new_data, new_area):
"""Add coordinate information to newly resampled DataArray.
Args:
old_data (xarray.DataArray): Old data before resampling.
new_data (xarray.DataArray): New data after resampling.
new_area (pyresample.geometry.BaseDefinition): Area definition
for the newly resampled data.
"""
# copy over other non-x/y coordinates
# this *MUST* happen before we set 'crs' below otherwise any 'crs'
# coordinate in the coordinate variables we are copying will overwrite the
# 'crs' coordinate we just assigned to the data
ignore_coords = ('y', 'x', 'crs')
new_coords = {}
for cname, cval in old_data.coords.items():
# we don't want coordinates that depended on the old x/y dimensions
has_ignored_dims = any(dim in cval.dims for dim in ignore_coords)
if cname in ignore_coords or has_ignored_dims:
continue
new_coords[cname] = cval
new_data = new_data.assign_coords(**new_coords)
# add crs, x, and y coordinates
new_data = add_crs_xy_coords(new_data, new_area)
# make sure the new area is assigned to the attributes
new_data.attrs['area'] = new_area
return new_data
class Resampler:
"""Base abstract resampler class."""
def __init__(self,
source_geo_def: Union[CoordinateDefinition, AreaDefinition],
target_geo_def: Union[CoordinateDefinition, AreaDefinition],
cache=None,
):
"""Initialize resampler with geolocation information.
Args:
source_geo_def:
Geolocation definition for the data to be resampled
target_geo_def:
Geolocation definition for the area to resample data to.
cache:
:class:`~pyresample.cache.ResampleCache` instance used by
the resampler to cache.
"""
self.source_geo_def = source_geo_def
self.target_geo_def = target_geo_def
if isinstance(cache, str):
# TODO: convenience for file based caching
raise NotImplementedError()
self.cache = cache
@property
@abc.abstractmethod
def version(self) -> str:
"""Get version number string of the current resampler for hashing and caching.
In cases where a resampler has changed enough that past cached files
are not usable then this version number will be incremented to
distinguish between the cache entries.
"""
def _get_hash(self, **kwargs) -> str:
"""Get hash for the current resampler with the given *kwargs*."""
kwargs.update({
"resampler_version": self.version,
"resampler_name": self.__class__.__name__,
})
return hash_resampler_geometries(self.source_geo_def, self.target_geo_def, **kwargs)
def precompute(self, **kwargs):
"""Do the precomputation.
This is an optional step if the subclass wants to implement more
complex features like caching or can share some calculations
between multiple datasets to be processed.
"""
return None
def resample(self, data):
"""Resample input ``data`` from the source geometry to the target geometry.
Args:
data (ArrayLike): Data to be resampled
Returns (ArrayLike): Data resampled to the target area
"""
self.precompute()
raise NotImplementedError("This method must be implemented by a subclass.")
| lgpl-3.0 |
schets/scikit-learn | sklearn/externals/joblib/__init__.py | 35 | 4382 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://packages.python.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
__version__ = '0.8.4'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
herilalaina/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 38 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
# #############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
# #############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
# #############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
nhuntwalker/astroML | book_figures/chapter5/fig_model_comparison_mcmc.py | 4 | 8103 | """
MCMC Model Comparison
---------------------
Figure 5.24
The top-right panel shows the posterior pdf for mu and sigma for a single
Gaussian fit to the data shown in figure 5.23. The remaining panels show the
projections of the five-dimensional pdf for a Gaussian mixture model with
two components. Contours are based on a 10,000 point MCMC chain.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy.special import gamma
from scipy.stats import norm
from sklearn.neighbors import BallTree
from astroML.density_estimation import GaussianMixture1D
from astroML.plotting import plot_mcmc
# hack to fix an import issue in older versions of pymc
import scipy
scipy.derivative = scipy.misc.derivative
import pymc
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def get_logp(S, model):
"""compute log(p) given a pyMC model"""
M = pymc.MAP(model)
traces = np.array([S.trace(s)[:] for s in S.stochastics])
logp = np.zeros(traces.shape[1])
for i in range(len(logp)):
logp[i] = -M.func(traces[:, i])
return logp
def estimate_bayes_factor(traces, logp, r=0.05, return_list=False):
"""Estimate the bayes factor using the local density of points"""
D, N = traces.shape
# compute volume of a D-dimensional sphere of radius r
Vr = np.pi ** (0.5 * D) / gamma(0.5 * D + 1) * (r ** D)
# use neighbor count within r as a density estimator
bt = BallTree(traces.T)
count = bt.query_radius(traces.T, r=r, count_only=True)
BF = logp + np.log(N) + np.log(Vr) - np.log(count)
if return_list:
return BF
else:
p25, p50, p75 = np.percentile(BF, [25, 50, 75])
return p50, 0.7413 * (p75 - p25)
#------------------------------------------------------------
# Generate the data
mu1_in = 0
sigma1_in = 0.3
mu2_in = 1
sigma2_in = 1
ratio_in = 1.5
N = 200
np.random.seed(10)
gm = GaussianMixture1D([mu1_in, mu2_in],
[sigma1_in, sigma2_in],
[ratio_in, 1])
x_sample = gm.sample(N)
#------------------------------------------------------------
# Set up pyMC model: single gaussian
# 2 parameters: (mu, sigma)
M1_mu = pymc.Uniform('M1_mu', -5, 5, value=0)
M1_log_sigma = pymc.Uniform('M1_log_sigma', -10, 10, value=0)
@pymc.deterministic
def M1_sigma(M1_log_sigma=M1_log_sigma):
return np.exp(M1_log_sigma)
@pymc.deterministic
def M1_tau(M1_sigma=M1_sigma):
return 1. / M1_sigma ** 2
M1 = pymc.Normal('M1', M1_mu, M1_tau, observed=True, value=x_sample)
model1 = dict(M1_mu=M1_mu, M1_log_sigma=M1_log_sigma,
M1_sigma=M1_sigma,
M1_tau=M1_tau, M1=M1)
#------------------------------------------------------------
# Set up pyMC model: double gaussian
# 5 parameters: (mu1, mu2, sigma1, sigma2, ratio)
def doublegauss_like(x, mu1, mu2, sigma1, sigma2, ratio):
"""log-likelihood for double gaussian"""
r1 = ratio / (1. + ratio)
r2 = 1 - r1
L = r1 * norm(mu1, sigma1).pdf(x) + r2 * norm(mu2, sigma2).pdf(x)
L[L == 0] = 1E-16 # prevent divide-by-zero error
logL = np.log(L).sum()
if np.isinf(logL):
raise pymc.ZeroProbability
else:
return logL
def rdoublegauss(mu1, mu2, sigma1, sigma2, ratio, size=None):
"""random variable from double gaussian"""
r1 = ratio / (1. + ratio)
r2 = 1 - r1
R = np.asarray(np.random.random(size))
Rshape = R.shape
R = np.atleast1d(R)
mask1 = (R < r1)
mask2 = ~mask1
N1 = mask1.sum()
N2 = R.size - N1
R[mask1] = norm(mu1, sigma1).rvs(N1)
R[mask2] = norm(mu2, sigma2).rvs(N2)
return R.reshape(Rshape)
DoubleGauss = pymc.stochastic_from_dist('doublegauss',
logp=doublegauss_like,
random=rdoublegauss,
dtype=np.float,
mv=True)
# set up our Stochastic variables, mu1, mu2, sigma1, sigma2, ratio
M2_mu1 = pymc.Uniform('M2_mu1', -5, 5, value=0)
M2_mu2 = pymc.Uniform('M2_mu2', -5, 5, value=1)
M2_log_sigma1 = pymc.Uniform('M2_log_sigma1', -10, 10, value=0)
M2_log_sigma2 = pymc.Uniform('M2_log_sigma2', -10, 10, value=0)
@pymc.deterministic
def M2_sigma1(M2_log_sigma1=M2_log_sigma1):
return np.exp(M2_log_sigma1)
@pymc.deterministic
def M2_sigma2(M2_log_sigma2=M2_log_sigma2):
return np.exp(M2_log_sigma2)
M2_ratio = pymc.Uniform('M2_ratio', 1E-3, 1E3, value=1)
M2 = DoubleGauss('M2', M2_mu1, M2_mu2, M2_sigma1, M2_sigma2, M2_ratio,
observed=True, value=x_sample)
model2 = dict(M2_mu1=M2_mu1, M2_mu2=M2_mu2,
M2_log_sigma1=M2_log_sigma1, M2_log_sigma2=M2_log_sigma2,
M2_sigma1=M2_sigma1, M2_sigma2=M2_sigma2,
M2_ratio=M2_ratio, M2=M2)
#------------------------------------------------------------
# Set up MCMC sampling
def compute_MCMC_models(Niter=10000, burn=1000, rseed=0):
pymc.numpy.random.seed(rseed)
S1 = pymc.MCMC(model1)
S1.sample(iter=Niter, burn=burn)
trace1 = np.vstack([S1.trace('M1_mu')[:],
S1.trace('M1_sigma')[:]])
logp1 = get_logp(S1, model1)
S2 = pymc.MCMC(model2)
S2.sample(iter=Niter, burn=burn)
trace2 = np.vstack([S2.trace('M2_mu1')[:],
S2.trace('M2_mu2')[:],
S2.trace('M2_sigma1')[:],
S2.trace('M2_sigma2')[:],
S2.trace('M2_ratio')[:]])
logp2 = get_logp(S2, model2)
return trace1, logp1, trace2, logp2
trace1, logp1, trace2, logp2 = compute_MCMC_models()
#------------------------------------------------------------
# Compute Odds ratio with density estimation technique
BF1, dBF1 = estimate_bayes_factor(trace1, logp1, r=0.02)
BF2, dBF2 = estimate_bayes_factor(trace2, logp2, r=0.05)
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 5))
labels = [r'$\mu_1$',
r'$\mu_2$',
r'$\sigma_1$',
r'$\sigma_2$',
r'${\rm ratio}$']
true_values = [mu1_in,
mu2_in,
sigma1_in,
sigma2_in,
ratio_in]
limits = [(-0.24, 0.12),
(0.55, 1.75),
(0.15, 0.45),
(0.55, 1.3),
(0.25, 2.1)]
# we assume mu1 < mu2, but the results may be switched
# due to the symmetry of the problem. If so, switch back
if np.median(trace2[0]) > np.median(trace2[1]):
trace2 = trace2[[1, 0, 3, 2, 4], :]
N2_norm_mu = N2.mu[N2.M2_mu2, N2.M2_mu1,
N2.M2_sigma2, N2.M2_sigma1, N2.M2_ratio]
N2_norm_Sig = N2.C[N2.M2_mu2, N2.M2_mu1,
N2.M2_sigma2, N2.M2_sigma1, N2.M2_ratio]
# Plot the simple 2-component model
ax, = plot_mcmc(trace1, fig=fig, bounds=[0.6, 0.6, 0.95, 0.95],
limits=[(0.3, 0.8), (0.75, 1.15)],
labels=[r'$\mu$', r'$\sigma$'], colors='k')
ax.text(0.05, 0.95, "Single Gaussian fit", va='top', ha='left',
transform=ax.transAxes)
# Plot the 5-component model
ax_list = plot_mcmc(trace2, limits=limits, labels=labels,
true_values=true_values, fig=fig,
bounds=(0.12, 0.12, 0.95, 0.95),
colors='k')
for ax in ax_list:
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_locator(plt.MaxNLocator(4))
plt.show()
| bsd-2-clause |
oldzhu/linux | tools/power/pm-graph/analyze_suspend.py | 55 | 176452 | #!/usr/bin/python
#
# Tool for analyzing suspend/resume timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# Authors:
# Todd Brandt <todd.e.brandt@linux.intel.com>
#
# Links:
# Home Page
# https://01.org/suspendresume
# Source repo
# https://github.com/01org/pm-graph
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's suspend/resume time. Using a kernel image built
# with a few extra options enabled, the tool will execute a suspend and
# will capture dmesg and ftrace data until resume is complete. This data
# is transformed into a device timeline and a callgraph to give a quick
# and detailed view of which devices and callbacks are taking the most
# time in suspend/resume. The output is a single html file which can be
# viewed in firefox or chrome.
#
# The following kernel build options are required:
# CONFIG_PM_DEBUG=y
# CONFIG_PM_SLEEP_DEBUG=y
# CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER=y
# CONFIG_FUNCTION_GRAPH_TRACER=y
# CONFIG_KPROBES=y
# CONFIG_KPROBES_ON_FTRACE=y
#
# For kernel versions older than 3.15:
# The following additional kernel parameters are required:
# (e.g. in file /etc/default/grub)
# GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..."
#
# ----------------- LIBRARIES --------------------
import sys
import time
import os
import string
import re
import platform
from datetime import datetime
import struct
import ConfigParser
from threading import Thread
from subprocess import call, Popen, PIPE
# ----------------- CLASSES --------------------
# Class: SystemValues
# Description:
# A global, single-instance container used to
# store system values and test parameters
class SystemValues:
title = 'SleepGraph'
version = '4.6'
ansi = False
verbose = False
addlogs = False
mindevlen = 0.0
mincglen = 0.0
cgphase = ''
cgtest = -1
max_graph_depth = 0
callloopmaxgap = 0.0001
callloopmaxlen = 0.005
srgap = 0
cgexp = False
outdir = ''
testdir = '.'
tpath = '/sys/kernel/debug/tracing/'
fpdtpath = '/sys/firmware/acpi/tables/FPDT'
epath = '/sys/kernel/debug/tracing/events/power/'
traceevents = [
'suspend_resume',
'device_pm_callback_end',
'device_pm_callback_start'
]
logmsg = ''
testcommand = ''
mempath = '/dev/mem'
powerfile = '/sys/power/state'
suspendmode = 'mem'
hostname = 'localhost'
prefix = 'test'
teststamp = ''
dmesgstart = 0.0
dmesgfile = ''
ftracefile = ''
htmlfile = ''
embedded = False
rtcwake = True
rtcwaketime = 15
rtcpath = ''
devicefilter = []
stamp = 0
execcount = 1
x2delay = 0
usecallgraph = False
usetraceevents = False
usetraceeventsonly = False
usetracemarkers = True
usekprobes = True
usedevsrc = False
useprocmon = False
notestrun = False
mixedphaseheight = True
devprops = dict()
predelay = 0
postdelay = 0
procexecfmt = 'ps - (?P<ps>.*)$'
devpropfmt = '# Device Properties: .*'
tracertypefmt = '# tracer: (?P<t>.*)'
firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
stampfmt = '# suspend-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
'(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
tracefuncs = {
'sys_sync': dict(),
'pm_prepare_console': dict(),
'pm_notifier_call_chain': dict(),
'freeze_processes': dict(),
'freeze_kernel_threads': dict(),
'pm_restrict_gfp_mask': dict(),
'acpi_suspend_begin': dict(),
'suspend_console': dict(),
'acpi_pm_prepare': dict(),
'syscore_suspend': dict(),
'arch_enable_nonboot_cpus_end': dict(),
'syscore_resume': dict(),
'acpi_pm_finish': dict(),
'resume_console': dict(),
'acpi_pm_end': dict(),
'pm_restore_gfp_mask': dict(),
'thaw_processes': dict(),
'pm_restore_console': dict(),
'CPU_OFF': {
'func':'_cpu_down',
'args_x86_64': {'cpu':'%di:s32'},
'format': 'CPU_OFF[{cpu}]'
},
'CPU_ON': {
'func':'_cpu_up',
'args_x86_64': {'cpu':'%di:s32'},
'format': 'CPU_ON[{cpu}]'
},
}
dev_tracefuncs = {
# general wait/delay/sleep
'msleep': { 'args_x86_64': {'time':'%di:s32'}, 'ub': 1 },
'schedule_timeout_uninterruptible': { 'args_x86_64': {'timeout':'%di:s32'}, 'ub': 1 },
'schedule_timeout': { 'args_x86_64': {'timeout':'%di:s32'}, 'ub': 1 },
'udelay': { 'func':'__const_udelay', 'args_x86_64': {'loops':'%di:s32'}, 'ub': 1 },
'usleep_range': { 'args_x86_64': {'min':'%di:s32', 'max':'%si:s32'}, 'ub': 1 },
'mutex_lock_slowpath': { 'func':'__mutex_lock_slowpath', 'ub': 1 },
'acpi_os_stall': {'ub': 1},
# ACPI
'acpi_resume_power_resources': dict(),
'acpi_ps_parse_aml': dict(),
# filesystem
'ext4_sync_fs': dict(),
# 80211
'iwlagn_mac_start': dict(),
'iwlagn_alloc_bcast_station': dict(),
'iwl_trans_pcie_start_hw': dict(),
'iwl_trans_pcie_start_fw': dict(),
'iwl_run_init_ucode': dict(),
'iwl_load_ucode_wait_alive': dict(),
'iwl_alive_start': dict(),
'iwlagn_mac_stop': dict(),
'iwlagn_mac_suspend': dict(),
'iwlagn_mac_resume': dict(),
'iwlagn_mac_add_interface': dict(),
'iwlagn_mac_remove_interface': dict(),
'iwlagn_mac_change_interface': dict(),
'iwlagn_mac_config': dict(),
'iwlagn_configure_filter': dict(),
'iwlagn_mac_hw_scan': dict(),
'iwlagn_bss_info_changed': dict(),
'iwlagn_mac_channel_switch': dict(),
'iwlagn_mac_flush': dict(),
# ATA
'ata_eh_recover': { 'args_x86_64': {'port':'+36(%di):s32'} },
# i915
'i915_gem_resume': dict(),
'i915_restore_state': dict(),
'intel_opregion_setup': dict(),
'g4x_pre_enable_dp': dict(),
'vlv_pre_enable_dp': dict(),
'chv_pre_enable_dp': dict(),
'g4x_enable_dp': dict(),
'vlv_enable_dp': dict(),
'intel_hpd_init': dict(),
'intel_opregion_register': dict(),
'intel_dp_detect': dict(),
'intel_hdmi_detect': dict(),
'intel_opregion_init': dict(),
'intel_fbdev_set_suspend': dict(),
}
kprobes = dict()
timeformat = '%.3f'
def __init__(self):
# if this is a phoronix test run, set some default options
if('LOG_FILE' in os.environ and 'TEST_RESULTS_IDENTIFIER' in os.environ):
self.embedded = True
self.addlogs = True
self.htmlfile = os.environ['LOG_FILE']
self.archargs = 'args_'+platform.machine()
self.hostname = platform.node()
if(self.hostname == ''):
self.hostname = 'localhost'
rtc = "rtc0"
if os.path.exists('/dev/rtc'):
rtc = os.readlink('/dev/rtc')
rtc = '/sys/class/rtc/'+rtc
if os.path.exists(rtc) and os.path.exists(rtc+'/date') and \
os.path.exists(rtc+'/time') and os.path.exists(rtc+'/wakealarm'):
self.rtcpath = rtc
if (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()):
self.ansi = True
def rootUser(self, fatal=False):
if 'USER' in os.environ and os.environ['USER'] == 'root':
return True
if fatal:
doError('This command must be run as root')
return False
def setPrecision(self, num):
if num < 0 or num > 6:
return
self.timeformat = '%.{0}f'.format(num)
def setOutputFolder(self, value):
args = dict()
n = datetime.now()
args['date'] = n.strftime('%y%m%d')
args['time'] = n.strftime('%H%M%S')
args['hostname'] = self.hostname
self.outdir = value.format(**args)
def setOutputFile(self):
if((self.htmlfile == '') and (self.dmesgfile != '')):
m = re.match('(?P<name>.*)_dmesg\.txt$', self.dmesgfile)
if(m):
self.htmlfile = m.group('name')+'.html'
if((self.htmlfile == '') and (self.ftracefile != '')):
m = re.match('(?P<name>.*)_ftrace\.txt$', self.ftracefile)
if(m):
self.htmlfile = m.group('name')+'.html'
if(self.htmlfile == ''):
self.htmlfile = 'output.html'
def initTestOutput(self, subdir, testpath=''):
self.prefix = self.hostname
v = open('/proc/version', 'r').read().strip()
kver = string.split(v)[2]
n = datetime.now()
testtime = n.strftime('suspend-%m%d%y-%H%M%S')
if not testpath:
testpath = n.strftime('suspend-%y%m%d-%H%M%S')
if(subdir != "."):
self.testdir = subdir+"/"+testpath
else:
self.testdir = testpath
self.teststamp = \
'# '+testtime+' '+self.prefix+' '+self.suspendmode+' '+kver
if(self.embedded):
self.dmesgfile = \
'/tmp/'+testtime+'_'+self.suspendmode+'_dmesg.txt'
self.ftracefile = \
'/tmp/'+testtime+'_'+self.suspendmode+'_ftrace.txt'
return
self.dmesgfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_dmesg.txt'
self.ftracefile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_ftrace.txt'
self.htmlfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'.html'
if not os.path.isdir(self.testdir):
os.mkdir(self.testdir)
def setDeviceFilter(self, value):
self.devicefilter = []
if value:
value = value.split(',')
for i in value:
self.devicefilter.append(i.strip())
def rtcWakeAlarmOn(self):
call('echo 0 > '+self.rtcpath+'/wakealarm', shell=True)
outD = open(self.rtcpath+'/date', 'r').read().strip()
outT = open(self.rtcpath+'/time', 'r').read().strip()
mD = re.match('^(?P<y>[0-9]*)-(?P<m>[0-9]*)-(?P<d>[0-9]*)', outD)
mT = re.match('^(?P<h>[0-9]*):(?P<m>[0-9]*):(?P<s>[0-9]*)', outT)
if(mD and mT):
# get the current time from hardware
utcoffset = int((datetime.now() - datetime.utcnow()).total_seconds())
dt = datetime(\
int(mD.group('y')), int(mD.group('m')), int(mD.group('d')),
int(mT.group('h')), int(mT.group('m')), int(mT.group('s')))
nowtime = int(dt.strftime('%s')) + utcoffset
else:
# if hardware time fails, use the software time
nowtime = int(datetime.now().strftime('%s'))
alarm = nowtime + self.rtcwaketime
call('echo %d > %s/wakealarm' % (alarm, self.rtcpath), shell=True)
def rtcWakeAlarmOff(self):
call('echo 0 > %s/wakealarm' % self.rtcpath, shell=True)
def initdmesg(self):
# get the latest time stamp from the dmesg log
fp = Popen('dmesg', stdout=PIPE).stdout
ktime = '0'
for line in fp:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
ktime = m.group('ktime')
fp.close()
self.dmesgstart = float(ktime)
def getdmesg(self):
# store all new dmesg lines since initdmesg was called
fp = Popen('dmesg', stdout=PIPE).stdout
op = open(self.dmesgfile, 'a')
for line in fp:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
if ktime > self.dmesgstart:
op.write(line)
fp.close()
op.close()
def addFtraceFilterFunctions(self, file):
fp = open(file)
list = fp.read().split('\n')
fp.close()
for i in list:
if len(i) < 2:
continue
self.tracefuncs[i] = dict()
def getFtraceFilterFunctions(self, current):
rootCheck(True)
if not current:
call('cat '+self.tpath+'available_filter_functions', shell=True)
return
fp = open(self.tpath+'available_filter_functions')
master = fp.read().split('\n')
fp.close()
for i in self.tracefuncs:
if 'func' in self.tracefuncs[i]:
i = self.tracefuncs[i]['func']
if i in master:
print i
else:
print self.colorText(i)
def setFtraceFilterFunctions(self, list):
fp = open(self.tpath+'available_filter_functions')
master = fp.read().split('\n')
fp.close()
flist = ''
for i in list:
if i not in master:
continue
if ' [' in i:
flist += i.split(' ')[0]+'\n'
else:
flist += i+'\n'
fp = open(self.tpath+'set_graph_function', 'w')
fp.write(flist)
fp.close()
def basicKprobe(self, name):
self.kprobes[name] = {'name': name,'func': name,'args': dict(),'format': name}
def defaultKprobe(self, name, kdata):
k = kdata
for field in ['name', 'format', 'func']:
if field not in k:
k[field] = name
if self.archargs in k:
k['args'] = k[self.archargs]
else:
k['args'] = dict()
k['format'] = name
self.kprobes[name] = k
def kprobeColor(self, name):
if name not in self.kprobes or 'color' not in self.kprobes[name]:
return ''
return self.kprobes[name]['color']
def kprobeDisplayName(self, name, dataraw):
if name not in self.kprobes:
self.basicKprobe(name)
data = ''
quote=0
# first remvoe any spaces inside quotes, and the quotes
for c in dataraw:
if c == '"':
quote = (quote + 1) % 2
if quote and c == ' ':
data += '_'
elif c != '"':
data += c
fmt, args = self.kprobes[name]['format'], self.kprobes[name]['args']
arglist = dict()
# now process the args
for arg in sorted(args):
arglist[arg] = ''
m = re.match('.* '+arg+'=(?P<arg>.*) ', data);
if m:
arglist[arg] = m.group('arg')
else:
m = re.match('.* '+arg+'=(?P<arg>.*)', data);
if m:
arglist[arg] = m.group('arg')
out = fmt.format(**arglist)
out = out.replace(' ', '_').replace('"', '')
return out
def kprobeText(self, kname, kprobe):
name = fmt = func = kname
args = dict()
if 'name' in kprobe:
name = kprobe['name']
if 'format' in kprobe:
fmt = kprobe['format']
if 'func' in kprobe:
func = kprobe['func']
if self.archargs in kprobe:
args = kprobe[self.archargs]
if 'args' in kprobe:
args = kprobe['args']
if re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', func):
doError('Kprobe "%s" has format info in the function name "%s"' % (name, func))
for arg in re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', fmt):
if arg not in args:
doError('Kprobe "%s" is missing argument "%s"' % (name, arg))
val = 'p:%s_cal %s' % (name, func)
for i in sorted(args):
val += ' %s=%s' % (i, args[i])
val += '\nr:%s_ret %s $retval\n' % (name, func)
return val
def addKprobes(self, output=False):
if len(sysvals.kprobes) < 1:
return
if output:
print(' kprobe functions in this kernel:')
# first test each kprobe
rejects = []
# sort kprobes: trace, ub-dev, custom, dev
kpl = [[], [], [], []]
for name in sorted(self.kprobes):
res = self.colorText('YES', 32)
if not self.testKprobe(name, self.kprobes[name]):
res = self.colorText('NO')
rejects.append(name)
else:
if name in self.tracefuncs:
kpl[0].append(name)
elif name in self.dev_tracefuncs:
if 'ub' in self.dev_tracefuncs[name]:
kpl[1].append(name)
else:
kpl[3].append(name)
else:
kpl[2].append(name)
if output:
print(' %s: %s' % (name, res))
kplist = kpl[0] + kpl[1] + kpl[2] + kpl[3]
# remove all failed ones from the list
for name in rejects:
self.kprobes.pop(name)
# set the kprobes all at once
self.fsetVal('', 'kprobe_events')
kprobeevents = ''
for kp in kplist:
kprobeevents += self.kprobeText(kp, self.kprobes[kp])
self.fsetVal(kprobeevents, 'kprobe_events')
# verify that the kprobes were set as ordered
check = self.fgetVal('kprobe_events')
linesout = len(kprobeevents.split('\n')) - 1
linesack = len(check.split('\n')) - 1
if output:
res = '%d/%d' % (linesack, linesout)
if linesack < linesout:
res = self.colorText(res, 31)
else:
res = self.colorText(res, 32)
print(' working kprobe functions enabled: %s' % res)
self.fsetVal('1', 'events/kprobes/enable')
def testKprobe(self, kname, kprobe):
self.fsetVal('0', 'events/kprobes/enable')
kprobeevents = self.kprobeText(kname, kprobe)
if not kprobeevents:
return False
try:
self.fsetVal(kprobeevents, 'kprobe_events')
check = self.fgetVal('kprobe_events')
except:
return False
linesout = len(kprobeevents.split('\n'))
linesack = len(check.split('\n'))
if linesack < linesout:
return False
return True
def fsetVal(self, val, path, mode='w'):
file = self.tpath+path
if not os.path.exists(file):
return False
try:
fp = open(file, mode, 0)
fp.write(val)
fp.flush()
fp.close()
except:
pass
return True
def fgetVal(self, path):
file = self.tpath+path
res = ''
if not os.path.exists(file):
return res
try:
fp = open(file, 'r')
res = fp.read()
fp.close()
except:
pass
return res
def cleanupFtrace(self):
if(self.usecallgraph or self.usetraceevents):
self.fsetVal('0', 'events/kprobes/enable')
self.fsetVal('', 'kprobe_events')
def setupAllKprobes(self):
for name in self.tracefuncs:
self.defaultKprobe(name, self.tracefuncs[name])
for name in self.dev_tracefuncs:
self.defaultKprobe(name, self.dev_tracefuncs[name])
def isCallgraphFunc(self, name):
if len(self.tracefuncs) < 1 and self.suspendmode == 'command':
return True
for i in self.tracefuncs:
if 'func' in self.tracefuncs[i]:
f = self.tracefuncs[i]['func']
else:
f = i
if name == f:
return True
return False
def initFtrace(self, testing=False):
print('INITIALIZING FTRACE...')
# turn trace off
self.fsetVal('0', 'tracing_on')
self.cleanupFtrace()
# set the trace clock to global
self.fsetVal('global', 'trace_clock')
# set trace buffer to a huge value
self.fsetVal('nop', 'current_tracer')
self.fsetVal('131073', 'buffer_size_kb')
# go no further if this is just a status check
if testing:
return
# initialize the callgraph trace
if(self.usecallgraph):
# set trace type
self.fsetVal('function_graph', 'current_tracer')
self.fsetVal('', 'set_ftrace_filter')
# set trace format options
self.fsetVal('print-parent', 'trace_options')
self.fsetVal('funcgraph-abstime', 'trace_options')
self.fsetVal('funcgraph-cpu', 'trace_options')
self.fsetVal('funcgraph-duration', 'trace_options')
self.fsetVal('funcgraph-proc', 'trace_options')
self.fsetVal('funcgraph-tail', 'trace_options')
self.fsetVal('nofuncgraph-overhead', 'trace_options')
self.fsetVal('context-info', 'trace_options')
self.fsetVal('graph-time', 'trace_options')
self.fsetVal('%d' % self.max_graph_depth, 'max_graph_depth')
cf = ['dpm_run_callback']
if(self.usetraceeventsonly):
cf += ['dpm_prepare', 'dpm_complete']
for fn in self.tracefuncs:
if 'func' in self.tracefuncs[fn]:
cf.append(self.tracefuncs[fn]['func'])
else:
cf.append(fn)
self.setFtraceFilterFunctions(cf)
# initialize the kprobe trace
elif self.usekprobes:
for name in self.tracefuncs:
self.defaultKprobe(name, self.tracefuncs[name])
if self.usedevsrc:
for name in self.dev_tracefuncs:
self.defaultKprobe(name, self.dev_tracefuncs[name])
print('INITIALIZING KPROBES...')
self.addKprobes(self.verbose)
if(self.usetraceevents):
# turn trace events on
events = iter(self.traceevents)
for e in events:
self.fsetVal('1', 'events/power/'+e+'/enable')
# clear the trace buffer
self.fsetVal('', 'trace')
def verifyFtrace(self):
# files needed for any trace data
files = ['buffer_size_kb', 'current_tracer', 'trace', 'trace_clock',
'trace_marker', 'trace_options', 'tracing_on']
# files needed for callgraph trace data
tp = self.tpath
if(self.usecallgraph):
files += [
'available_filter_functions',
'set_ftrace_filter',
'set_graph_function'
]
for f in files:
if(os.path.exists(tp+f) == False):
return False
return True
def verifyKprobes(self):
# files needed for kprobes to work
files = ['kprobe_events', 'events']
tp = self.tpath
for f in files:
if(os.path.exists(tp+f) == False):
return False
return True
def colorText(self, str, color=31):
if not self.ansi:
return str
return '\x1B[%d;40m%s\x1B[m' % (color, str)
sysvals = SystemValues()
suspendmodename = {
'freeze': 'Freeze (S0)',
'standby': 'Standby (S1)',
'mem': 'Suspend (S3)',
'disk': 'Hibernate (S4)'
}
# Class: DevProps
# Description:
# Simple class which holds property values collected
# for all the devices used in the timeline.
class DevProps:
syspath = ''
altname = ''
async = True
xtraclass = ''
xtrainfo = ''
def out(self, dev):
return '%s,%s,%d;' % (dev, self.altname, self.async)
def debug(self, dev):
print '%s:\n\taltname = %s\n\t async = %s' % (dev, self.altname, self.async)
def altName(self, dev):
if not self.altname or self.altname == dev:
return dev
return '%s [%s]' % (self.altname, dev)
def xtraClass(self):
if self.xtraclass:
return ' '+self.xtraclass
if not self.async:
return ' sync'
return ''
def xtraInfo(self):
if self.xtraclass:
return ' '+self.xtraclass
if self.async:
return ' async_device'
return ' sync_device'
# Class: DeviceNode
# Description:
# A container used to create a device hierachy, with a single root node
# and a tree of child nodes. Used by Data.deviceTopology()
class DeviceNode:
name = ''
children = 0
depth = 0
def __init__(self, nodename, nodedepth):
self.name = nodename
self.children = []
self.depth = nodedepth
# Class: Data
# Description:
# The primary container for suspend/resume test data. There is one for
# each test run. The data is organized into a cronological hierarchy:
# Data.dmesg {
# phases {
# 10 sequential, non-overlapping phases of S/R
# contents: times for phase start/end, order/color data for html
# devlist {
# device callback or action list for this phase
# device {
# a single device callback or generic action
# contents: start/stop times, pid/cpu/driver info
# parents/children, html id for timeline/callgraph
# optionally includes an ftrace callgraph
# optionally includes dev/ps data
# }
# }
# }
# }
#
class Data:
dmesg = {} # root data structure
phases = [] # ordered list of phases
start = 0.0 # test start
end = 0.0 # test end
tSuspended = 0.0 # low-level suspend start
tResumed = 0.0 # low-level resume start
tKernSus = 0.0 # kernel level suspend start
tKernRes = 0.0 # kernel level resume end
tLow = 0.0 # time spent in low-level suspend (standby/freeze)
fwValid = False # is firmware data available
fwSuspend = 0 # time spent in firmware suspend
fwResume = 0 # time spent in firmware resume
dmesgtext = [] # dmesg text file in memory
pstl = 0 # process timeline
testnumber = 0
idstr = ''
html_device_id = 0
stamp = 0
outfile = ''
devpids = []
kerror = False
def __init__(self, num):
idchar = 'abcdefghij'
self.pstl = dict()
self.testnumber = num
self.idstr = idchar[num]
self.dmesgtext = []
self.phases = []
self.dmesg = { # fixed list of 10 phases
'suspend_prepare': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#CCFFCC', 'order': 0},
'suspend': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#88FF88', 'order': 1},
'suspend_late': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#00AA00', 'order': 2},
'suspend_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#008888', 'order': 3},
'suspend_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#0000FF', 'order': 4},
'resume_machine': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FF0000', 'order': 5},
'resume_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FF9900', 'order': 6},
'resume_early': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFCC00', 'order': 7},
'resume': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFFF88', 'order': 8},
'resume_complete': {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': '#FFFFCC', 'order': 9}
}
self.phases = self.sortedPhases()
self.devicegroups = []
for phase in self.phases:
self.devicegroups.append([phase])
self.errorinfo = {'suspend':[],'resume':[]}
def extractErrorInfo(self, dmesg):
error = ''
tm = 0.0
for i in range(len(dmesg)):
if 'Call Trace:' in dmesg[i]:
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) .*', dmesg[i])
if not m:
continue
tm = float(m.group('ktime'))
if tm < self.start or tm > self.end:
continue
for j in range(i-10, i+1):
error += dmesg[j]
continue
if error:
m = re.match('[ \t]*\[ *[0-9\.]*\] \[\<[0-9a-fA-F]*\>\] .*', dmesg[i])
if m:
error += dmesg[i]
else:
if tm < self.tSuspended:
dir = 'suspend'
else:
dir = 'resume'
error = error.replace('<', '<').replace('>', '>')
vprint('kernel error found in %s at %f' % (dir, tm))
self.errorinfo[dir].append((tm, error))
self.kerror = True
error = ''
def setStart(self, time):
self.start = time
def setEnd(self, time):
self.end = time
def isTraceEventOutsideDeviceCalls(self, pid, time):
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time < d['end']):
return False
return True
def sourcePhase(self, start):
for phase in self.phases:
pend = self.dmesg[phase]['end']
if start <= pend:
return phase
return 'resume_complete'
def sourceDevice(self, phaselist, start, end, pid, type):
tgtdev = ''
for phase in phaselist:
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
# pid must match
if dev['pid'] != pid:
continue
devS = dev['start']
devE = dev['end']
if type == 'device':
# device target event is entirely inside the source boundary
if(start < devS or start >= devE or end <= devS or end > devE):
continue
elif type == 'thread':
# thread target event will expand the source boundary
if start < devS:
dev['start'] = start
if end > devE:
dev['end'] = end
tgtdev = dev
break
return tgtdev
def addDeviceFunctionCall(self, displayname, kprobename, proc, pid, start, end, cdata, rdata):
# try to place the call in a device
tgtdev = self.sourceDevice(self.phases, start, end, pid, 'device')
# calls with device pids that occur outside device bounds are dropped
# TODO: include these somehow
if not tgtdev and pid in self.devpids:
return False
# try to place the call in a thread
if not tgtdev:
tgtdev = self.sourceDevice(self.phases, start, end, pid, 'thread')
# create new thread blocks, expand as new calls are found
if not tgtdev:
if proc == '<...>':
threadname = 'kthread-%d' % (pid)
else:
threadname = '%s-%d' % (proc, pid)
tgtphase = self.sourcePhase(start)
self.newAction(tgtphase, threadname, pid, '', start, end, '', ' kth', '')
return self.addDeviceFunctionCall(displayname, kprobename, proc, pid, start, end, cdata, rdata)
# this should not happen
if not tgtdev:
vprint('[%f - %f] %s-%d %s %s %s' % \
(start, end, proc, pid, kprobename, cdata, rdata))
return False
# place the call data inside the src element of the tgtdev
if('src' not in tgtdev):
tgtdev['src'] = []
dtf = sysvals.dev_tracefuncs
ubiquitous = False
if kprobename in dtf and 'ub' in dtf[kprobename]:
ubiquitous = True
title = cdata+' '+rdata
mstr = '\(.*\) *(?P<args>.*) *\((?P<caller>.*)\+.* arg1=(?P<ret>.*)'
m = re.match(mstr, title)
if m:
c = m.group('caller')
a = m.group('args').strip()
r = m.group('ret')
if len(r) > 6:
r = ''
else:
r = 'ret=%s ' % r
if ubiquitous and c in dtf and 'ub' in dtf[c]:
return False
color = sysvals.kprobeColor(kprobename)
e = DevFunction(displayname, a, c, r, start, end, ubiquitous, proc, pid, color)
tgtdev['src'].append(e)
return True
def overflowDevices(self):
# get a list of devices that extend beyond the end of this test run
devlist = []
for phase in self.phases:
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
if dev['end'] > self.end:
devlist.append(dev)
return devlist
def mergeOverlapDevices(self, devlist):
# merge any devices that overlap devlist
for dev in devlist:
devname = dev['name']
for phase in self.phases:
list = self.dmesg[phase]['list']
if devname not in list:
continue
tdev = list[devname]
o = min(dev['end'], tdev['end']) - max(dev['start'], tdev['start'])
if o <= 0:
continue
dev['end'] = tdev['end']
if 'src' not in dev or 'src' not in tdev:
continue
dev['src'] += tdev['src']
del list[devname]
def usurpTouchingThread(self, name, dev):
# the caller test has priority of this thread, give it to him
for phase in self.phases:
list = self.dmesg[phase]['list']
if name in list:
tdev = list[name]
if tdev['start'] - dev['end'] < 0.1:
dev['end'] = tdev['end']
if 'src' not in dev:
dev['src'] = []
if 'src' in tdev:
dev['src'] += tdev['src']
del list[name]
break
def stitchTouchingThreads(self, testlist):
# merge any threads between tests that touch
for phase in self.phases:
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
if 'htmlclass' not in dev or 'kth' not in dev['htmlclass']:
continue
for data in testlist:
data.usurpTouchingThread(devname, dev)
def optimizeDevSrc(self):
# merge any src call loops to reduce timeline size
for phase in self.phases:
list = self.dmesg[phase]['list']
for dev in list:
if 'src' not in list[dev]:
continue
src = list[dev]['src']
p = 0
for e in sorted(src, key=lambda event: event.time):
if not p or not e.repeat(p):
p = e
continue
# e is another iteration of p, move it into p
p.end = e.end
p.length = p.end - p.time
p.count += 1
src.remove(e)
def trimTimeVal(self, t, t0, dT, left):
if left:
if(t > t0):
if(t - dT < t0):
return t0
return t - dT
else:
return t
else:
if(t < t0 + dT):
if(t > t0):
return t0 + dT
return t + dT
else:
return t
def trimTime(self, t0, dT, left):
self.tSuspended = self.trimTimeVal(self.tSuspended, t0, dT, left)
self.tResumed = self.trimTimeVal(self.tResumed, t0, dT, left)
self.start = self.trimTimeVal(self.start, t0, dT, left)
self.tKernSus = self.trimTimeVal(self.tKernSus, t0, dT, left)
self.tKernRes = self.trimTimeVal(self.tKernRes, t0, dT, left)
self.end = self.trimTimeVal(self.end, t0, dT, left)
for phase in self.phases:
p = self.dmesg[phase]
p['start'] = self.trimTimeVal(p['start'], t0, dT, left)
p['end'] = self.trimTimeVal(p['end'], t0, dT, left)
list = p['list']
for name in list:
d = list[name]
d['start'] = self.trimTimeVal(d['start'], t0, dT, left)
d['end'] = self.trimTimeVal(d['end'], t0, dT, left)
if('ftrace' in d):
cg = d['ftrace']
cg.start = self.trimTimeVal(cg.start, t0, dT, left)
cg.end = self.trimTimeVal(cg.end, t0, dT, left)
for line in cg.list:
line.time = self.trimTimeVal(line.time, t0, dT, left)
if('src' in d):
for e in d['src']:
e.time = self.trimTimeVal(e.time, t0, dT, left)
def normalizeTime(self, tZero):
# trim out any standby or freeze clock time
if(self.tSuspended != self.tResumed):
if(self.tResumed > tZero):
self.trimTime(self.tSuspended, \
self.tResumed-self.tSuspended, True)
else:
self.trimTime(self.tSuspended, \
self.tResumed-self.tSuspended, False)
def setPhase(self, phase, ktime, isbegin):
if(isbegin):
self.dmesg[phase]['start'] = ktime
else:
self.dmesg[phase]['end'] = ktime
def dmesgSortVal(self, phase):
return self.dmesg[phase]['order']
def sortedPhases(self):
return sorted(self.dmesg, key=self.dmesgSortVal)
def sortedDevices(self, phase):
list = self.dmesg[phase]['list']
slist = []
tmp = dict()
for devname in list:
dev = list[devname]
if dev['length'] == 0:
continue
tmp[dev['start']] = devname
for t in sorted(tmp):
slist.append(tmp[t])
return slist
def fixupInitcalls(self, phase):
# if any calls never returned, clip them at system resume end
phaselist = self.dmesg[phase]['list']
for devname in phaselist:
dev = phaselist[devname]
if(dev['end'] < 0):
for p in self.phases:
if self.dmesg[p]['end'] > dev['start']:
dev['end'] = self.dmesg[p]['end']
break
vprint('%s (%s): callback didnt return' % (devname, phase))
def deviceFilter(self, devicefilter):
for phase in self.phases:
list = self.dmesg[phase]['list']
rmlist = []
for name in list:
keep = False
for filter in devicefilter:
if filter in name or \
('drv' in list[name] and filter in list[name]['drv']):
keep = True
if not keep:
rmlist.append(name)
for name in rmlist:
del list[name]
def fixupInitcallsThatDidntReturn(self):
# if any calls never returned, clip them at system resume end
for phase in self.phases:
self.fixupInitcalls(phase)
def phaseOverlap(self, phases):
rmgroups = []
newgroup = []
for group in self.devicegroups:
for phase in phases:
if phase not in group:
continue
for p in group:
if p not in newgroup:
newgroup.append(p)
if group not in rmgroups:
rmgroups.append(group)
for group in rmgroups:
self.devicegroups.remove(group)
self.devicegroups.append(newgroup)
def newActionGlobal(self, name, start, end, pid=-1, color=''):
# which phase is this device callback or action in
targetphase = 'none'
htmlclass = ''
overlap = 0.0
phases = []
for phase in self.phases:
pstart = self.dmesg[phase]['start']
pend = self.dmesg[phase]['end']
# see if the action overlaps this phase
o = max(0, min(end, pend) - max(start, pstart))
if o > 0:
phases.append(phase)
# set the target phase to the one that overlaps most
if o > overlap:
if overlap > 0 and phase == 'post_resume':
continue
targetphase = phase
overlap = o
# if no target phase was found, pin it to the edge
if targetphase == 'none':
p0start = self.dmesg[self.phases[0]]['start']
if start <= p0start:
targetphase = self.phases[0]
else:
targetphase = self.phases[-1]
if pid == -2:
htmlclass = ' bg'
elif pid == -3:
htmlclass = ' ps'
if len(phases) > 1:
htmlclass = ' bg'
self.phaseOverlap(phases)
if targetphase in self.phases:
newname = self.newAction(targetphase, name, pid, '', start, end, '', htmlclass, color)
return (targetphase, newname)
return False
def newAction(self, phase, name, pid, parent, start, end, drv, htmlclass='', color=''):
# new device callback for a specific phase
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
if pid == -2:
i = 2
origname = name
while(name in list):
name = '%s[%d]' % (origname, i)
i += 1
list[name] = {'name': name, 'start': start, 'end': end, 'pid': pid,
'par': parent, 'length': length, 'row': 0, 'id': devid, 'drv': drv }
if htmlclass:
list[name]['htmlclass'] = htmlclass
if color:
list[name]['color'] = color
return name
def deviceChildren(self, devname, phase):
devlist = []
list = self.dmesg[phase]['list']
for child in list:
if(list[child]['par'] == devname):
devlist.append(child)
return devlist
def printDetails(self):
vprint('Timeline Details:')
vprint(' test start: %f' % self.start)
vprint('kernel suspend start: %f' % self.tKernSus)
for phase in self.phases:
dc = len(self.dmesg[phase]['list'])
vprint(' %16s: %f - %f (%d devices)' % (phase, \
self.dmesg[phase]['start'], self.dmesg[phase]['end'], dc))
vprint(' kernel resume end: %f' % self.tKernRes)
vprint(' test end: %f' % self.end)
def deviceChildrenAllPhases(self, devname):
devlist = []
for phase in self.phases:
list = self.deviceChildren(devname, phase)
for dev in list:
if dev not in devlist:
devlist.append(dev)
return devlist
def masterTopology(self, name, list, depth):
node = DeviceNode(name, depth)
for cname in list:
# avoid recursions
if name == cname:
continue
clist = self.deviceChildrenAllPhases(cname)
cnode = self.masterTopology(cname, clist, depth+1)
node.children.append(cnode)
return node
def printTopology(self, node):
html = ''
if node.name:
info = ''
drv = ''
for phase in self.phases:
list = self.dmesg[phase]['list']
if node.name in list:
s = list[node.name]['start']
e = list[node.name]['end']
if list[node.name]['drv']:
drv = ' {'+list[node.name]['drv']+'}'
info += ('<li>%s: %.3fms</li>' % (phase, (e-s)*1000))
html += '<li><b>'+node.name+drv+'</b>'
if info:
html += '<ul>'+info+'</ul>'
html += '</li>'
if len(node.children) > 0:
html += '<ul>'
for cnode in node.children:
html += self.printTopology(cnode)
html += '</ul>'
return html
def rootDeviceList(self):
# list of devices graphed
real = []
for phase in self.dmesg:
list = self.dmesg[phase]['list']
for dev in list:
if list[dev]['pid'] >= 0 and dev not in real:
real.append(dev)
# list of top-most root devices
rootlist = []
for phase in self.dmesg:
list = self.dmesg[phase]['list']
for dev in list:
pdev = list[dev]['par']
pid = list[dev]['pid']
if(pid < 0 or re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
continue
if pdev and pdev not in real and pdev not in rootlist:
rootlist.append(pdev)
return rootlist
def deviceTopology(self):
rootlist = self.rootDeviceList()
master = self.masterTopology('', rootlist, 0)
return self.printTopology(master)
def selectTimelineDevices(self, widfmt, tTotal, mindevlen):
# only select devices that will actually show up in html
self.tdevlist = dict()
for phase in self.dmesg:
devlist = []
list = self.dmesg[phase]['list']
for dev in list:
length = (list[dev]['end'] - list[dev]['start']) * 1000
width = widfmt % (((list[dev]['end']-list[dev]['start'])*100)/tTotal)
if width != '0.000000' and length >= mindevlen:
devlist.append(dev)
self.tdevlist[phase] = devlist
def addHorizontalDivider(self, devname, devend):
phase = 'suspend_prepare'
self.newAction(phase, devname, -2, '', \
self.start, devend, '', ' sec', '')
if phase not in self.tdevlist:
self.tdevlist[phase] = []
self.tdevlist[phase].append(devname)
d = DevItem(0, phase, self.dmesg[phase]['list'][devname])
return d
def addProcessUsageEvent(self, name, times):
# get the start and end times for this process
maxC = 0
tlast = 0
start = -1
end = -1
for t in sorted(times):
if tlast == 0:
tlast = t
continue
if name in self.pstl[t]:
if start == -1 or tlast < start:
start = tlast
if end == -1 or t > end:
end = t
tlast = t
if start == -1 or end == -1:
return 0
# add a new action for this process and get the object
out = self.newActionGlobal(name, start, end, -3)
if not out:
return 0
phase, devname = out
dev = self.dmesg[phase]['list'][devname]
# get the cpu exec data
tlast = 0
clast = 0
cpuexec = dict()
for t in sorted(times):
if tlast == 0 or t <= start or t > end:
tlast = t
continue
list = self.pstl[t]
c = 0
if name in list:
c = list[name]
if c > maxC:
maxC = c
if c != clast:
key = (tlast, t)
cpuexec[key] = c
tlast = t
clast = c
dev['cpuexec'] = cpuexec
return maxC
def createProcessUsageEvents(self):
# get an array of process names
proclist = []
for t in self.pstl:
pslist = self.pstl[t]
for ps in pslist:
if ps not in proclist:
proclist.append(ps)
# get a list of data points for suspend and resume
tsus = []
tres = []
for t in sorted(self.pstl):
if t < self.tSuspended:
tsus.append(t)
else:
tres.append(t)
# process the events for suspend and resume
if len(proclist) > 0:
vprint('Process Execution:')
for ps in proclist:
c = self.addProcessUsageEvent(ps, tsus)
if c > 0:
vprint('%25s (sus): %d' % (ps, c))
c = self.addProcessUsageEvent(ps, tres)
if c > 0:
vprint('%25s (res): %d' % (ps, c))
# Class: DevFunction
# Description:
# A container for kprobe function data we want in the dev timeline
class DevFunction:
row = 0
count = 1
def __init__(self, name, args, caller, ret, start, end, u, proc, pid, color):
self.name = name
self.args = args
self.caller = caller
self.ret = ret
self.time = start
self.length = end - start
self.end = end
self.ubiquitous = u
self.proc = proc
self.pid = pid
self.color = color
def title(self):
cnt = ''
if self.count > 1:
cnt = '(x%d)' % self.count
l = '%0.3fms' % (self.length * 1000)
if self.ubiquitous:
title = '%s(%s)%s <- %s, %s(%s)' % \
(self.name, self.args, cnt, self.caller, self.ret, l)
else:
title = '%s(%s) %s%s(%s)' % (self.name, self.args, self.ret, cnt, l)
return title.replace('"', '')
def text(self):
if self.count > 1:
text = '%s(x%d)' % (self.name, self.count)
else:
text = self.name
return text
def repeat(self, tgt):
# is the tgt call just a repeat of this call (e.g. are we in a loop)
dt = self.time - tgt.end
# only combine calls if -all- attributes are identical
if tgt.caller == self.caller and \
tgt.name == self.name and tgt.args == self.args and \
tgt.proc == self.proc and tgt.pid == self.pid and \
tgt.ret == self.ret and dt >= 0 and \
dt <= sysvals.callloopmaxgap and \
self.length < sysvals.callloopmaxlen:
return True
return False
# Class: FTraceLine
# Description:
# A container for a single line of ftrace data. There are six basic types:
# callgraph line:
# call: " dpm_run_callback() {"
# return: " }"
# leaf: " dpm_run_callback();"
# trace event:
# tracing_mark_write: SUSPEND START or RESUME COMPLETE
# suspend_resume: phase or custom exec block data
# device_pm_callback: device callback info
class FTraceLine:
time = 0.0
length = 0.0
fcall = False
freturn = False
fevent = False
fkprobe = False
depth = 0
name = ''
type = ''
def __init__(self, t, m='', d=''):
self.time = float(t)
if not m and not d:
return
# is this a trace event
if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
if(d == 'traceevent'):
# nop format trace event
msg = m
else:
# function_graph format trace event
em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)
msg = em.group('msg')
emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg)
if(emm):
self.name = emm.group('msg')
self.type = emm.group('call')
else:
self.name = msg
km = re.match('^(?P<n>.*)_cal$', self.type)
if km:
self.fcall = True
self.fkprobe = True
self.type = km.group('n')
return
km = re.match('^(?P<n>.*)_ret$', self.type)
if km:
self.freturn = True
self.fkprobe = True
self.type = km.group('n')
return
self.fevent = True
return
# convert the duration to seconds
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
match = re.match('^(?P<d> *)(?P<o>.*)$', m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
m = match.group('o')
# function return
if(m[0] == '}'):
self.freturn = True
if(len(m) > 1):
# includes comment with function name
match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m)
if(match):
self.name = match.group('n').strip()
# function call
else:
self.fcall = True
# function call with children
if(m[-1] == '{'):
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# something else (possibly a trace marker)
else:
self.name = m
def getDepth(self, str):
return len(str)/2
def debugPrint(self, dev=''):
if(self.freturn and self.fcall):
print('%s -- %f (%02d): %s(); (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
elif(self.freturn):
print('%s -- %f (%02d): %s} (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
else:
print('%s -- %f (%02d): %s() { (%.3f us)' % (dev, self.time, \
self.depth, self.name, self.length*1000000))
def startMarker(self):
# Is this the starting line of a suspend?
if not self.fevent:
return False
if sysvals.usetracemarkers:
if(self.name == 'SUSPEND START'):
return True
return False
else:
if(self.type == 'suspend_resume' and
re.match('suspend_enter\[.*\] begin', self.name)):
return True
return False
def endMarker(self):
# Is this the ending line of a resume?
if not self.fevent:
return False
if sysvals.usetracemarkers:
if(self.name == 'RESUME COMPLETE'):
return True
return False
else:
if(self.type == 'suspend_resume' and
re.match('thaw_processes\[.*\] end', self.name)):
return True
return False
# Class: FTraceCallGraph
# Description:
# A container for the ftrace callgraph of a single recursive function.
# This can be a dpm_run_callback, dpm_prepare, or dpm_complete callgraph
# Each instance is tied to a single device in a single phase, and is
# comprised of an ordered list of FTraceLine objects
class FTraceCallGraph:
id = ''
start = -1.0
end = -1.0
list = []
invalid = False
depth = 0
pid = 0
name = ''
def __init__(self, pid):
self.start = -1.0
self.end = -1.0
self.list = []
self.depth = 0
self.pid = pid
def addLine(self, line, debug=False):
# if this is already invalid, just leave
if(self.invalid):
return False
# invalidate on too much data or bad depth
if(len(self.list) >= 1000000 or self.depth < 0):
self.invalidate(line)
return False
# compare current depth with this lines pre-call depth
prelinedep = line.depth
if(line.freturn and not line.fcall):
prelinedep += 1
last = 0
lasttime = line.time
virtualfname = 'execution_misalignment'
if len(self.list) > 0:
last = self.list[-1]
lasttime = last.time
# handle low misalignments by inserting returns
if prelinedep < self.depth:
if debug and last:
print '-------- task %d --------' % self.pid
last.debugPrint()
idx = 0
# add return calls to get the depth down
while prelinedep < self.depth:
if debug:
print 'MISALIGN LOW (add returns): C%d - eC%d' % (self.depth, prelinedep)
self.depth -= 1
if idx == 0 and last and last.fcall and not last.freturn:
# special case, turn last call into a leaf
last.depth = self.depth
last.freturn = True
last.length = line.time - last.time
if debug:
last.debugPrint()
else:
vline = FTraceLine(lasttime)
vline.depth = self.depth
vline.name = virtualfname
vline.freturn = True
self.list.append(vline)
if debug:
vline.debugPrint()
idx += 1
if debug:
line.debugPrint()
print ''
# handle high misalignments by inserting calls
elif prelinedep > self.depth:
if debug and last:
print '-------- task %d --------' % self.pid
last.debugPrint()
idx = 0
# add calls to get the depth up
while prelinedep > self.depth:
if debug:
print 'MISALIGN HIGH (add calls): C%d - eC%d' % (self.depth, prelinedep)
if idx == 0 and line.freturn and not line.fcall:
# special case, turn this return into a leaf
line.fcall = True
prelinedep -= 1
else:
vline = FTraceLine(lasttime)
vline.depth = self.depth
vline.name = virtualfname
vline.fcall = True
if debug:
vline.debugPrint()
self.list.append(vline)
self.depth += 1
if not last:
self.start = vline.time
idx += 1
if debug:
line.debugPrint()
print ''
# process the call and set the new depth
if(line.fcall and not line.freturn):
self.depth += 1
elif(line.freturn and not line.fcall):
self.depth -= 1
if len(self.list) < 1:
self.start = line.time
self.list.append(line)
if(line.depth == 0 and line.freturn):
if(self.start < 0):
self.start = line.time
self.end = line.time
if line.fcall:
self.end += line.length
if self.list[0].name == virtualfname:
self.invalid = True
return True
return False
def invalidate(self, line):
if(len(self.list) > 0):
first = self.list[0]
self.list = []
self.list.append(first)
self.invalid = True
id = 'task %s' % (self.pid)
window = '(%f - %f)' % (self.start, line.time)
if(self.depth < 0):
vprint('Too much data for '+id+\
' (buffer overflow), ignoring this callback')
else:
vprint('Too much data for '+id+\
' '+window+', ignoring this callback')
def slice(self, t0, tN):
minicg = FTraceCallGraph(0)
count = -1
firstdepth = 0
for l in self.list:
if(l.time < t0 or l.time > tN):
continue
if(count < 0):
if(not l.fcall or l.name == 'dev_driver_string'):
continue
firstdepth = l.depth
count = 0
l.depth -= firstdepth
minicg.addLine(l)
if((count == 0 and l.freturn and l.fcall) or
(count > 0 and l.depth <= 0)):
break
count += 1
return minicg
def repair(self, enddepth):
# bring the depth back to 0 with additional returns
fixed = False
last = self.list[-1]
for i in reversed(range(enddepth)):
t = FTraceLine(last.time)
t.depth = i
t.freturn = True
fixed = self.addLine(t)
if fixed:
self.end = last.time
return True
return False
def postProcess(self, debug=False):
if len(self.list) > 0:
self.name = self.list[0].name
stack = dict()
cnt = 0
last = 0
for l in self.list:
# ftrace bug: reported duration is not reliable
# check each leaf and clip it at max possible length
if(last and last.freturn and last.fcall):
if last.length > l.time - last.time:
last.length = l.time - last.time
if(l.fcall and not l.freturn):
stack[l.depth] = l
cnt += 1
elif(l.freturn and not l.fcall):
if(l.depth not in stack):
if debug:
print 'Post Process Error: Depth missing'
l.debugPrint()
return False
# calculate call length from call/return lines
stack[l.depth].length = l.time - stack[l.depth].time
stack.pop(l.depth)
l.length = 0
cnt -= 1
last = l
if(cnt == 0):
# trace caught the whole call tree
return True
elif(cnt < 0):
if debug:
print 'Post Process Error: Depth is less than 0'
return False
# trace ended before call tree finished
return self.repair(cnt)
def deviceMatch(self, pid, data):
found = False
# add the callgraph data to the device hierarchy
borderphase = {
'dpm_prepare': 'suspend_prepare',
'dpm_complete': 'resume_complete'
}
if(self.name in borderphase):
p = borderphase[self.name]
list = data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
self.start <= dev['start'] and
self.end >= dev['end']):
dev['ftrace'] = self.slice(dev['start'], dev['end'])
found = True
return found
for p in data.phases:
if(data.dmesg[p]['start'] <= self.start and
self.start <= data.dmesg[p]['end']):
list = data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
self.start <= dev['start'] and
self.end >= dev['end']):
dev['ftrace'] = self
found = True
break
break
return found
def newActionFromFunction(self, data):
name = self.name
if name in ['dpm_run_callback', 'dpm_prepare', 'dpm_complete']:
return
fs = self.start
fe = self.end
if fs < data.start or fe > data.end:
return
phase = ''
for p in data.phases:
if(data.dmesg[p]['start'] <= self.start and
self.start < data.dmesg[p]['end']):
phase = p
break
if not phase:
return
out = data.newActionGlobal(name, fs, fe, -2)
if out:
phase, myname = out
data.dmesg[phase]['list'][myname]['ftrace'] = self
def debugPrint(self):
print('[%f - %f] %s (%d)') % (self.start, self.end, self.name, self.pid)
for l in self.list:
if(l.freturn and l.fcall):
print('%f (%02d): %s(); (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
elif(l.freturn):
print('%f (%02d): %s} (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
else:
print('%f (%02d): %s() { (%.3f us)' % (l.time, \
l.depth, l.name, l.length*1000000))
print(' ')
class DevItem:
def __init__(self, test, phase, dev):
self.test = test
self.phase = phase
self.dev = dev
def isa(self, cls):
if 'htmlclass' in self.dev and cls in self.dev['htmlclass']:
return True
return False
# Class: Timeline
# Description:
# A container for a device timeline which calculates
# all the html properties to display it correctly
class Timeline:
html = ''
height = 0 # total timeline height
scaleH = 20 # timescale (top) row height
rowH = 30 # device row height
bodyH = 0 # body height
rows = 0 # total timeline rows
rowlines = dict()
rowheight = dict()
html_tblock = '<div id="block{0}" class="tblock" style="left:{1}%;width:{2}%;"><div class="tback" style="height:{3}px"></div>\n'
html_device = '<div id="{0}" title="{1}" class="thread{7}" style="left:{2}%;top:{3}px;height:{4}px;width:{5}%;{8}">{6}</div>\n'
html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}px;height:{3}px;background:{4}">{5}</div>\n'
html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background:{3}"></div>\n'
def __init__(self, rowheight, scaleheight):
self.rowH = rowheight
self.scaleH = scaleheight
self.html = ''
def createHeader(self, sv, suppress=''):
if(not sv.stamp['time']):
return
self.html += '<div class="version"><a href="https://01.org/suspendresume">%s v%s</a></div>' \
% (sv.title, sv.version)
if sv.logmsg and 'log' not in suppress:
self.html += '<button id="showtest" class="logbtn">log</button>'
if sv.addlogs and 'dmesg' not in suppress:
self.html += '<button id="showdmesg" class="logbtn">dmesg</button>'
if sv.addlogs and sv.ftracefile and 'ftrace' not in suppress:
self.html += '<button id="showftrace" class="logbtn">ftrace</button>'
headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n'
self.html += headline_stamp.format(sv.stamp['host'], sv.stamp['kernel'],
sv.stamp['mode'], sv.stamp['time'])
# Function: getDeviceRows
# Description:
# determine how may rows the device funcs will take
# Arguments:
# rawlist: the list of devices/actions for a single phase
# Output:
# The total number of rows needed to display this phase of the timeline
def getDeviceRows(self, rawlist):
# clear all rows and set them to undefined
sortdict = dict()
for item in rawlist:
item.row = -1
sortdict[item] = item.length
sortlist = sorted(sortdict, key=sortdict.get, reverse=True)
remaining = len(sortlist)
rowdata = dict()
row = 1
# try to pack each row with as many ranges as possible
while(remaining > 0):
if(row not in rowdata):
rowdata[row] = []
for i in sortlist:
if(i.row >= 0):
continue
s = i.time
e = i.time + i.length
valid = True
for ritem in rowdata[row]:
rs = ritem.time
re = ritem.time + ritem.length
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(i)
i.row = row
remaining -= 1
row += 1
return row
# Function: getPhaseRows
# Description:
# Organize the timeline entries into the smallest
# number of rows possible, with no entry overlapping
# Arguments:
# devlist: the list of devices/actions in a group of contiguous phases
# Output:
# The total number of rows needed to display this phase of the timeline
def getPhaseRows(self, devlist, row=0):
# clear all rows and set them to undefined
remaining = len(devlist)
rowdata = dict()
sortdict = dict()
myphases = []
# initialize all device rows to -1 and calculate devrows
for item in devlist:
dev = item.dev
tp = (item.test, item.phase)
if tp not in myphases:
myphases.append(tp)
dev['row'] = -1
# sort by length 1st, then name 2nd
sortdict[item] = (float(dev['end']) - float(dev['start']), item.dev['name'])
if 'src' in dev:
dev['devrows'] = self.getDeviceRows(dev['src'])
# sort the devlist by length so that large items graph on top
sortlist = sorted(sortdict, key=sortdict.get, reverse=True)
orderedlist = []
for item in sortlist:
if item.dev['pid'] == -2:
orderedlist.append(item)
for item in sortlist:
if item not in orderedlist:
orderedlist.append(item)
# try to pack each row with as many devices as possible
while(remaining > 0):
rowheight = 1
if(row not in rowdata):
rowdata[row] = []
for item in orderedlist:
dev = item.dev
if(dev['row'] < 0):
s = dev['start']
e = dev['end']
valid = True
for ritem in rowdata[row]:
rs = ritem.dev['start']
re = ritem.dev['end']
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(item)
dev['row'] = row
remaining -= 1
if 'devrows' in dev and dev['devrows'] > rowheight:
rowheight = dev['devrows']
for t, p in myphases:
if t not in self.rowlines or t not in self.rowheight:
self.rowlines[t] = dict()
self.rowheight[t] = dict()
if p not in self.rowlines[t] or p not in self.rowheight[t]:
self.rowlines[t][p] = dict()
self.rowheight[t][p] = dict()
rh = self.rowH
# section headers should use a different row height
if len(rowdata[row]) == 1 and \
'htmlclass' in rowdata[row][0].dev and \
'sec' in rowdata[row][0].dev['htmlclass']:
rh = 15
self.rowlines[t][p][row] = rowheight
self.rowheight[t][p][row] = rowheight * rh
row += 1
if(row > self.rows):
self.rows = int(row)
return row
def phaseRowHeight(self, test, phase, row):
return self.rowheight[test][phase][row]
def phaseRowTop(self, test, phase, row):
top = 0
for i in sorted(self.rowheight[test][phase]):
if i >= row:
break
top += self.rowheight[test][phase][i]
return top
def calcTotalRows(self):
# Calculate the heights and offsets for the header and rows
maxrows = 0
standardphases = []
for t in self.rowlines:
for p in self.rowlines[t]:
total = 0
for i in sorted(self.rowlines[t][p]):
total += self.rowlines[t][p][i]
if total > maxrows:
maxrows = total
if total == len(self.rowlines[t][p]):
standardphases.append((t, p))
self.height = self.scaleH + (maxrows*self.rowH)
self.bodyH = self.height - self.scaleH
# if there is 1 line per row, draw them the standard way
for t, p in standardphases:
for i in sorted(self.rowheight[t][p]):
self.rowheight[t][p][i] = self.bodyH/len(self.rowlines[t][p])
def createZoomBox(self, mode='command', testcount=1):
# Create bounding box, add buttons
html_zoombox = '<center><button id="zoomin">ZOOM IN +</button><button id="zoomout">ZOOM OUT -</button><button id="zoomdef">ZOOM 1:1</button></center>\n'
html_timeline = '<div id="dmesgzoombox" class="zoombox">\n<div id="{0}" class="timeline" style="height:{1}px">\n'
html_devlist1 = '<button id="devlist1" class="devlist" style="float:left;">Device Detail{0}</button>'
html_devlist2 = '<button id="devlist2" class="devlist" style="float:right;">Device Detail2</button>\n'
if mode != 'command':
if testcount > 1:
self.html += html_devlist2
self.html += html_devlist1.format('1')
else:
self.html += html_devlist1.format('')
self.html += html_zoombox
self.html += html_timeline.format('dmesg', self.height)
# Function: createTimeScale
# Description:
# Create the timescale for a timeline block
# Arguments:
# m0: start time (mode begin)
# mMax: end time (mode end)
# tTotal: total timeline time
# mode: suspend or resume
# Output:
# The html code needed to display the time scale
def createTimeScale(self, m0, mMax, tTotal, mode):
timescale = '<div class="t" style="right:{0}%">{1}</div>\n'
rline = '<div class="t" style="left:0;border-left:1px solid black;border-right:0;">{0}</div>\n'
output = '<div class="timescale">\n'
# set scale for timeline
mTotal = mMax - m0
tS = 0.1
if(tTotal <= 0):
return output+'</div>\n'
if(tTotal > 4):
tS = 1
divTotal = int(mTotal/tS) + 1
divEdge = (mTotal - tS*(divTotal-1))*100/mTotal
for i in range(divTotal):
htmlline = ''
if(mode == 'suspend'):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/mTotal) - divEdge)
val = '%0.fms' % (float(i-divTotal+1)*tS*1000)
if(i == divTotal - 1):
val = mode
htmlline = timescale.format(pos, val)
else:
pos = '%0.3f' % (100 - ((float(i)*tS*100)/mTotal))
val = '%0.fms' % (float(i)*tS*1000)
htmlline = timescale.format(pos, val)
if(i == 0):
htmlline = rline.format(mode)
output += htmlline
self.html += output+'</div>\n'
# Class: TestProps
# Description:
# A list of values describing the properties of these test runs
class TestProps:
stamp = ''
S0i3 = False
fwdata = []
ftrace_line_fmt_fg = \
'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
'[ +!#\*@$]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
ftrace_line_fmt_nop = \
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
'(?P<flags>.{4}) *(?P<time>[0-9\.]*): *'+\
'(?P<msg>.*)'
ftrace_line_fmt = ftrace_line_fmt_nop
cgformat = False
data = 0
ktemp = dict()
def __init__(self):
self.ktemp = dict()
def setTracerType(self, tracer):
if(tracer == 'function_graph'):
self.cgformat = True
self.ftrace_line_fmt = self.ftrace_line_fmt_fg
elif(tracer == 'nop'):
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
else:
doError('Invalid tracer format: [%s]' % tracer)
# Class: TestRun
# Description:
# A container for a suspend/resume test run. This is necessary as
# there could be more than one, and they need to be separate.
class TestRun:
ftemp = dict()
ttemp = dict()
data = 0
def __init__(self, dataobj):
self.data = dataobj
self.ftemp = dict()
self.ttemp = dict()
class ProcessMonitor:
proclist = dict()
running = False
def procstat(self):
c = ['cat /proc/[1-9]*/stat 2>/dev/null']
process = Popen(c, shell=True, stdout=PIPE)
running = dict()
for line in process.stdout:
data = line.split()
pid = data[0]
name = re.sub('[()]', '', data[1])
user = int(data[13])
kern = int(data[14])
kjiff = ujiff = 0
if pid not in self.proclist:
self.proclist[pid] = {'name' : name, 'user' : user, 'kern' : kern}
else:
val = self.proclist[pid]
ujiff = user - val['user']
kjiff = kern - val['kern']
val['user'] = user
val['kern'] = kern
if ujiff > 0 or kjiff > 0:
running[pid] = ujiff + kjiff
process.wait()
out = ''
for pid in running:
jiffies = running[pid]
val = self.proclist[pid]
if out:
out += ','
out += '%s-%s %d' % (val['name'], pid, jiffies)
return 'ps - '+out
def processMonitor(self, tid):
while self.running:
out = self.procstat()
if out:
sysvals.fsetVal(out, 'trace_marker')
def start(self):
self.thread = Thread(target=self.processMonitor, args=(0,))
self.running = True
self.thread.start()
def stop(self):
self.running = False
# ----------------- FUNCTIONS --------------------
# Function: vprint
# Description:
# verbose print (prints only with -verbose option)
# Arguments:
# msg: the debug/log message to print
def vprint(msg):
sysvals.logmsg += msg+'\n'
if(sysvals.verbose):
print(msg)
# Function: parseStamp
# Description:
# Pull in the stamp comment line from the data file(s),
# create the stamp, and add it to the global sysvals object
# Arguments:
# m: the valid re.match output for the stamp line
def parseStamp(line, data):
m = re.match(sysvals.stampfmt, line)
data.stamp = {'time': '', 'host': '', 'mode': ''}
dt = datetime(int(m.group('y'))+2000, int(m.group('m')),
int(m.group('d')), int(m.group('H')), int(m.group('M')),
int(m.group('S')))
data.stamp['time'] = dt.strftime('%B %d %Y, %I:%M:%S %p')
data.stamp['host'] = m.group('host')
data.stamp['mode'] = m.group('mode')
data.stamp['kernel'] = m.group('kernel')
sysvals.hostname = data.stamp['host']
sysvals.suspendmode = data.stamp['mode']
if sysvals.suspendmode == 'command' and sysvals.ftracefile != '':
modes = ['on', 'freeze', 'standby', 'mem']
out = Popen(['grep', 'suspend_enter', sysvals.ftracefile],
stderr=PIPE, stdout=PIPE).stdout.read()
m = re.match('.* suspend_enter\[(?P<mode>.*)\]', out)
if m and m.group('mode') in ['1', '2', '3']:
sysvals.suspendmode = modes[int(m.group('mode'))]
data.stamp['mode'] = sysvals.suspendmode
if not sysvals.stamp:
sysvals.stamp = data.stamp
# Function: doesTraceLogHaveTraceEvents
# Description:
# Quickly determine if the ftrace log has some or all of the trace events
# required for primary parsing. Set the usetraceevents and/or
# usetraceeventsonly flags in the global sysvals object
def doesTraceLogHaveTraceEvents():
# check for kprobes
sysvals.usekprobes = False
out = call('grep -q "_cal: (" '+sysvals.ftracefile, shell=True)
if(out == 0):
sysvals.usekprobes = True
# check for callgraph data on trace event blocks
out = call('grep -q "_cpu_down()" '+sysvals.ftracefile, shell=True)
if(out == 0):
sysvals.usekprobes = True
out = Popen(['head', '-1', sysvals.ftracefile],
stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
m = re.match(sysvals.stampfmt, out)
if m and m.group('mode') == 'command':
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = True
return
# figure out what level of trace events are supported
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = False
for e in sysvals.traceevents:
out = call('grep -q "'+e+': " '+sysvals.ftracefile, shell=True)
if(out != 0):
sysvals.usetraceeventsonly = False
if(e == 'suspend_resume' and out == 0):
sysvals.usetraceevents = True
# determine is this log is properly formatted
for e in ['SUSPEND START', 'RESUME COMPLETE']:
out = call('grep -q "'+e+'" '+sysvals.ftracefile, shell=True)
if(out != 0):
sysvals.usetracemarkers = False
# Function: appendIncompleteTraceLog
# Description:
# [deprecated for kernel 3.15 or newer]
# Legacy support of ftrace outputs that lack the device_pm_callback
# and/or suspend_resume trace events. The primary data should be
# taken from dmesg, and this ftrace is used only for callgraph data
# or custom actions in the timeline. The data is appended to the Data
# objects provided.
# Arguments:
# testruns: the array of Data objects obtained from parseKernelLog
def appendIncompleteTraceLog(testruns):
# create TestRun vessels for ftrace parsing
testcnt = len(testruns)
testidx = 0
testrun = []
for data in testruns:
testrun.append(TestRun(data))
# extract the callgraph and traceevent data
vprint('Analyzing the ftrace data...')
tp = TestProps()
tf = open(sysvals.ftracefile, 'r')
data = 0
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
# grab the time stamp
m = re.match(sysvals.stampfmt, line)
if(m):
tp.stamp = line
continue
# determine the trace data type (required for further parsing)
m = re.match(sysvals.tracertypefmt, line)
if(m):
tp.setTracerType(m.group('t'))
continue
# device properties line
if(re.match(sysvals.devpropfmt, line)):
devProps(line)
continue
# parse only valid lines, if this is not one move on
m = re.match(tp.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(tp.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# look for the suspend start marker
if(t.startMarker()):
data = testrun[testidx].data
parseStamp(tp.stamp, data)
data.setStart(t.time)
continue
if(not data):
continue
# find the end of resume
if(t.endMarker()):
data.setEnd(t.time)
testidx += 1
if(testidx >= testcnt):
break
continue
# trace event processing
if(t.fevent):
# general trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
if(m):
val = m.group('val')
if val == '0':
name = m.group('name')
else:
name = m.group('name')+'['+val+']'
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# special processing for trace events
if re.match('dpm_prepare\[.*', name):
continue
elif re.match('machine_suspend.*', name):
continue
elif re.match('suspend_enter\[.*', name):
if(not isbegin):
data.dmesg['suspend_prepare']['end'] = t.time
continue
elif re.match('dpm_suspend\[.*', name):
if(not isbegin):
data.dmesg['suspend']['end'] = t.time
continue
elif re.match('dpm_suspend_late\[.*', name):
if(isbegin):
data.dmesg['suspend_late']['start'] = t.time
else:
data.dmesg['suspend_late']['end'] = t.time
continue
elif re.match('dpm_suspend_noirq\[.*', name):
if(isbegin):
data.dmesg['suspend_noirq']['start'] = t.time
else:
data.dmesg['suspend_noirq']['end'] = t.time
continue
elif re.match('dpm_resume_noirq\[.*', name):
if(isbegin):
data.dmesg['resume_machine']['end'] = t.time
data.dmesg['resume_noirq']['start'] = t.time
else:
data.dmesg['resume_noirq']['end'] = t.time
continue
elif re.match('dpm_resume_early\[.*', name):
if(isbegin):
data.dmesg['resume_early']['start'] = t.time
else:
data.dmesg['resume_early']['end'] = t.time
continue
elif re.match('dpm_resume\[.*', name):
if(isbegin):
data.dmesg['resume']['start'] = t.time
else:
data.dmesg['resume']['end'] = t.time
continue
elif re.match('dpm_complete\[.*', name):
if(isbegin):
data.dmesg['resume_complete']['start'] = t.time
else:
data.dmesg['resume_complete']['end'] = t.time
continue
# skip trace events inside devices calls
if(not data.isTraceEventOutsideDeviceCalls(pid, t.time)):
continue
# global events (outside device calls) are simply graphed
if(isbegin):
# store each trace event in ttemp
if(name not in testrun[testidx].ttemp):
testrun[testidx].ttemp[name] = []
testrun[testidx].ttemp[name].append(\
{'begin': t.time, 'end': t.time})
else:
# finish off matching trace event in ttemp
if(name in testrun[testidx].ttemp):
testrun[testidx].ttemp[name][-1]['end'] = t.time
# call/return processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
if(pid not in testrun[testidx].ftemp):
testrun[testidx].ftemp[pid] = []
testrun[testidx].ftemp[pid].append(FTraceCallGraph(pid))
# when the call is finished, see which device matches it
cg = testrun[testidx].ftemp[pid][-1]
if(cg.addLine(t)):
testrun[testidx].ftemp[pid].append(FTraceCallGraph(pid))
tf.close()
for test in testrun:
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
for name in test.ttemp:
for event in test.ttemp[name]:
test.data.newActionGlobal(name, event['begin'], event['end'])
# add the callgraph data to the device hierarchy
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if len(cg.list) < 1 or cg.invalid:
continue
if(not cg.postProcess()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
for p in test.data.phases:
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
test.data.printDetails()
# Function: parseTraceLog
# Description:
# Analyze an ftrace log output file generated from this app during
# the execution phase. Used when the ftrace log is the primary data source
# and includes the suspend_resume and device_pm_callback trace events
# The ftrace filename is taken from sysvals
# Output:
# An array of Data objects
def parseTraceLog():
vprint('Analyzing the ftrace data...')
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
sysvals.setupAllKprobes()
tracewatch = []
if sysvals.usekprobes:
tracewatch += ['sync_filesystems', 'freeze_processes', 'syscore_suspend',
'syscore_resume', 'resume_console', 'thaw_processes', 'CPU_ON', 'CPU_OFF']
# extract the callgraph and traceevent data
tp = TestProps()
testruns = []
testdata = []
testrun = 0
data = 0
tf = open(sysvals.ftracefile, 'r')
phase = 'suspend_prepare'
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
# stamp line: each stamp means a new test run
m = re.match(sysvals.stampfmt, line)
if(m):
tp.stamp = line
continue
# firmware line: pull out any firmware data
m = re.match(sysvals.firmwarefmt, line)
if(m):
tp.fwdata.append((int(m.group('s')), int(m.group('r'))))
continue
# tracer type line: determine the trace data type
m = re.match(sysvals.tracertypefmt, line)
if(m):
tp.setTracerType(m.group('t'))
continue
# device properties line
if(re.match(sysvals.devpropfmt, line)):
devProps(line)
continue
# ignore all other commented lines
if line[0] == '#':
continue
# ftrace line: parse only valid lines
m = re.match(tp.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_proc = m.group('proc')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(tp.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# find the start of suspend
if(t.startMarker()):
phase = 'suspend_prepare'
data = Data(len(testdata))
testdata.append(data)
testrun = TestRun(data)
testruns.append(testrun)
parseStamp(tp.stamp, data)
data.setStart(t.time)
data.tKernSus = t.time
continue
if(not data):
continue
# process cpu exec line
if t.type == 'tracing_mark_write':
m = re.match(sysvals.procexecfmt, t.name)
if(m):
proclist = dict()
for ps in m.group('ps').split(','):
val = ps.split()
if not val:
continue
name = val[0].replace('--', '-')
proclist[name] = int(val[1])
data.pstl[t.time] = proclist
continue
# find the end of resume
if(t.endMarker()):
data.setEnd(t.time)
if data.tKernRes == 0.0:
data.tKernRes = t.time
if data.dmesg['resume_complete']['end'] < 0:
data.dmesg['resume_complete']['end'] = t.time
if sysvals.suspendmode == 'mem' and len(tp.fwdata) > data.testnumber:
data.fwSuspend, data.fwResume = tp.fwdata[data.testnumber]
if(data.tSuspended != 0 and data.tResumed != 0 and \
(data.fwSuspend > 0 or data.fwResume > 0)):
data.fwValid = True
if(not sysvals.usetracemarkers):
# no trace markers? then quit and be sure to finish recording
# the event we used to trigger resume end
if(len(testrun.ttemp['thaw_processes']) > 0):
# if an entry exists, assume this is its end
testrun.ttemp['thaw_processes'][-1]['end'] = t.time
break
continue
# trace event processing
if(t.fevent):
if(phase == 'post_resume'):
data.setEnd(t.time)
if(t.type == 'suspend_resume'):
# suspend_resume trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name)
if(m):
val = m.group('val')
if val == '0':
name = m.group('name')
else:
name = m.group('name')+'['+val+']'
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# ignore these events
if(name.split('[')[0] in tracewatch):
continue
# -- phase changes --
# start of kernel suspend
if(re.match('suspend_enter\[.*', t.name)):
if(isbegin):
data.dmesg[phase]['start'] = t.time
data.tKernSus = t.time
continue
# suspend_prepare start
elif(re.match('dpm_prepare\[.*', t.name)):
phase = 'suspend_prepare'
if(not isbegin):
data.dmesg[phase]['end'] = t.time
continue
# suspend start
elif(re.match('dpm_suspend\[.*', t.name)):
phase = 'suspend'
data.setPhase(phase, t.time, isbegin)
continue
# suspend_late start
elif(re.match('dpm_suspend_late\[.*', t.name)):
phase = 'suspend_late'
data.setPhase(phase, t.time, isbegin)
continue
# suspend_noirq start
elif(re.match('dpm_suspend_noirq\[.*', t.name)):
phase = 'suspend_noirq'
data.setPhase(phase, t.time, isbegin)
if(not isbegin):
phase = 'suspend_machine'
data.dmesg[phase]['start'] = t.time
continue
# suspend_machine/resume_machine
elif(re.match('machine_suspend\[.*', t.name)):
if(isbegin):
phase = 'suspend_machine'
data.dmesg[phase]['end'] = t.time
data.tSuspended = t.time
else:
if(sysvals.suspendmode in ['mem', 'disk'] and not tp.S0i3):
data.dmesg['suspend_machine']['end'] = t.time
data.tSuspended = t.time
phase = 'resume_machine'
data.dmesg[phase]['start'] = t.time
data.tResumed = t.time
data.tLow = data.tResumed - data.tSuspended
continue
# acpi_suspend
elif(re.match('acpi_suspend\[.*', t.name)):
# acpi_suspend[0] S0i3
if(re.match('acpi_suspend\[0\] begin', t.name)):
if(sysvals.suspendmode == 'mem'):
tp.S0i3 = True
data.dmesg['suspend_machine']['end'] = t.time
data.tSuspended = t.time
continue
# resume_noirq start
elif(re.match('dpm_resume_noirq\[.*', t.name)):
phase = 'resume_noirq'
data.setPhase(phase, t.time, isbegin)
if(isbegin):
data.dmesg['resume_machine']['end'] = t.time
continue
# resume_early start
elif(re.match('dpm_resume_early\[.*', t.name)):
phase = 'resume_early'
data.setPhase(phase, t.time, isbegin)
continue
# resume start
elif(re.match('dpm_resume\[.*', t.name)):
phase = 'resume'
data.setPhase(phase, t.time, isbegin)
continue
# resume complete start
elif(re.match('dpm_complete\[.*', t.name)):
phase = 'resume_complete'
if(isbegin):
data.dmesg[phase]['start'] = t.time
continue
# skip trace events inside devices calls
if(not data.isTraceEventOutsideDeviceCalls(pid, t.time)):
continue
# global events (outside device calls) are graphed
if(name not in testrun.ttemp):
testrun.ttemp[name] = []
if(isbegin):
# create a new list entry
testrun.ttemp[name].append(\
{'begin': t.time, 'end': t.time, 'pid': pid})
else:
if(len(testrun.ttemp[name]) > 0):
# if an entry exists, assume this is its end
testrun.ttemp[name][-1]['end'] = t.time
elif(phase == 'post_resume'):
# post resume events can just have ends
testrun.ttemp[name].append({
'begin': data.dmesg[phase]['start'],
'end': t.time})
# device callback start
elif(t.type == 'device_pm_callback_start'):
m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
t.name);
if(not m):
continue
drv = m.group('drv')
n = m.group('d')
p = m.group('p')
if(n and p):
data.newAction(phase, n, pid, p, t.time, -1, drv)
if pid not in data.devpids:
data.devpids.append(pid)
# device callback finish
elif(t.type == 'device_pm_callback_end'):
m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
if(not m):
continue
n = m.group('d')
list = data.dmesg[phase]['list']
if(n in list):
dev = list[n]
dev['length'] = t.time - dev['start']
dev['end'] = t.time
# kprobe event processing
elif(t.fkprobe):
kprobename = t.type
kprobedata = t.name
key = (kprobename, pid)
# displayname is generated from kprobe data
displayname = ''
if(t.fcall):
displayname = sysvals.kprobeDisplayName(kprobename, kprobedata)
if not displayname:
continue
if(key not in tp.ktemp):
tp.ktemp[key] = []
tp.ktemp[key].append({
'pid': pid,
'begin': t.time,
'end': t.time,
'name': displayname,
'cdata': kprobedata,
'proc': m_proc,
})
elif(t.freturn):
if(key not in tp.ktemp) or len(tp.ktemp[key]) < 1:
continue
e = tp.ktemp[key][-1]
if e['begin'] < 0.0 or t.time - e['begin'] < 0.000001:
tp.ktemp[key].pop()
else:
e['end'] = t.time
e['rdata'] = kprobedata
# end of kernel resume
if(kprobename == 'pm_notifier_call_chain' or \
kprobename == 'pm_restore_console'):
data.dmesg[phase]['end'] = t.time
data.tKernRes = t.time
# callgraph processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
key = (m_proc, pid)
if(key not in testrun.ftemp):
testrun.ftemp[key] = []
testrun.ftemp[key].append(FTraceCallGraph(pid))
# when the call is finished, see which device matches it
cg = testrun.ftemp[key][-1]
if(cg.addLine(t)):
testrun.ftemp[key].append(FTraceCallGraph(pid))
tf.close()
if sysvals.suspendmode == 'command':
for test in testruns:
for p in test.data.phases:
if p == 'suspend_prepare':
test.data.dmesg[p]['start'] = test.data.start
test.data.dmesg[p]['end'] = test.data.end
else:
test.data.dmesg[p]['start'] = test.data.end
test.data.dmesg[p]['end'] = test.data.end
test.data.tSuspended = test.data.end
test.data.tResumed = test.data.end
test.data.tLow = 0
test.data.fwValid = False
# dev source and procmon events can be unreadable with mixed phase height
if sysvals.usedevsrc or sysvals.useprocmon:
sysvals.mixedphaseheight = False
for i in range(len(testruns)):
test = testruns[i]
data = test.data
# find the total time range for this test (begin, end)
tlb, tle = data.start, data.end
if i < len(testruns) - 1:
tle = testruns[i+1].data.start
# add the process usage data to the timeline
if sysvals.useprocmon:
data.createProcessUsageEvents()
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
# add actual trace funcs
for name in test.ttemp:
for event in test.ttemp[name]:
data.newActionGlobal(name, event['begin'], event['end'], event['pid'])
# add the kprobe based virtual tracefuncs as actual devices
for key in tp.ktemp:
name, pid = key
if name not in sysvals.tracefuncs:
continue
for e in tp.ktemp[key]:
kb, ke = e['begin'], e['end']
if kb == ke or tlb > kb or tle <= kb:
continue
color = sysvals.kprobeColor(name)
data.newActionGlobal(e['name'], kb, ke, pid, color)
# add config base kprobes and dev kprobes
if sysvals.usedevsrc:
for key in tp.ktemp:
name, pid = key
if name in sysvals.tracefuncs or name not in sysvals.dev_tracefuncs:
continue
for e in tp.ktemp[key]:
kb, ke = e['begin'], e['end']
if kb == ke or tlb > kb or tle <= kb:
continue
data.addDeviceFunctionCall(e['name'], name, e['proc'], pid, kb,
ke, e['cdata'], e['rdata'])
if sysvals.usecallgraph:
# add the callgraph data to the device hierarchy
sortlist = dict()
for key in test.ftemp:
proc, pid = key
for cg in test.ftemp[key]:
if len(cg.list) < 1 or cg.invalid:
continue
if(not cg.postProcess()):
id = 'task %s' % (pid)
vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
# match cg data to devices
if sysvals.suspendmode == 'command' or not cg.deviceMatch(pid, data):
sortkey = '%f%f%d' % (cg.start, cg.end, pid)
sortlist[sortkey] = cg
# create blocks for orphan cg data
for sortkey in sorted(sortlist):
cg = sortlist[sortkey]
name = cg.name
if sysvals.isCallgraphFunc(name):
vprint('Callgraph found for task %d: %.3fms, %s' % (cg.pid, (cg.end - cg.start)*1000, name))
cg.newActionFromFunction(data)
if sysvals.suspendmode == 'command':
for data in testdata:
data.printDetails()
return testdata
# fill in any missing phases
for data in testdata:
lp = data.phases[0]
for p in data.phases:
if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
vprint('WARNING: phase "%s" is missing!' % p)
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == 'resume_machine'):
data.tSuspended = data.dmesg[lp]['end']
data.tResumed = data.dmesg[lp]['end']
data.tLow = 0
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
if(p != lp and not ('machine' in p and 'machine' in lp)):
data.dmesg[lp]['end'] = data.dmesg[p]['start']
lp = p
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
if sysvals.usedevsrc:
data.optimizeDevSrc()
data.printDetails()
# x2: merge any overlapping devices between test runs
if sysvals.usedevsrc and len(testdata) > 1:
tc = len(testdata)
for i in range(tc - 1):
devlist = testdata[i].overflowDevices()
for j in range(i + 1, tc):
testdata[j].mergeOverlapDevices(devlist)
testdata[0].stitchTouchingThreads(testdata[1:])
return testdata
# Function: loadKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# load the dmesg file into memory and fix up any ordering issues
# The dmesg filename is taken from sysvals
# Output:
# An array of empty Data objects with only their dmesgtext attributes set
def loadKernelLog(justtext=False):
vprint('Analyzing the dmesg data...')
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s does not exist' % sysvals.dmesgfile)
if justtext:
dmesgtext = []
# there can be multiple test runs in a single file
tp = TestProps()
tp.stamp = datetime.now().strftime('# suspend-%m%d%y-%H%M%S localhost mem unknown')
testruns = []
data = 0
lf = open(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match(sysvals.stampfmt, line)
if(m):
tp.stamp = line
continue
m = re.match(sysvals.firmwarefmt, line)
if(m):
tp.fwdata.append((int(m.group('s')), int(m.group('r'))))
continue
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
msg = m.group("msg")
if justtext:
dmesgtext.append(line)
continue
if(re.match('PM: Syncing filesystems.*', msg)):
if(data):
testruns.append(data)
data = Data(len(testruns))
parseStamp(tp.stamp, data)
if len(tp.fwdata) > data.testnumber:
data.fwSuspend, data.fwResume = tp.fwdata[data.testnumber]
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
if(not data):
continue
m = re.match('.* *(?P<k>[0-9]\.[0-9]{2}\.[0-9]-.*) .*', msg)
if(m):
sysvals.stamp['kernel'] = m.group('k')
m = re.match('PM: Preparing system for (?P<m>.*) sleep', msg)
if(m):
sysvals.stamp['mode'] = sysvals.suspendmode = m.group('m')
data.dmesgtext.append(line)
lf.close()
if justtext:
return dmesgtext
if data:
testruns.append(data)
if len(testruns) < 1:
doError(' dmesg log has no suspend/resume data: %s' \
% sysvals.dmesgfile)
# fix lines with same timestamp/function with the call and return swapped
for data in testruns:
last = ''
for line in data.dmesgtext:
mc = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
'(?P<f>.*)\+ @ .*, parent: .*', line)
mr = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
'(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', last)
if(mc and mr and (mc.group('t') == mr.group('t')) and
(mc.group('f') == mr.group('f'))):
i = data.dmesgtext.index(last)
j = data.dmesgtext.index(line)
data.dmesgtext[i] = line
data.dmesgtext[j] = last
last = line
return testruns
# Function: parseKernelLog
# Description:
# [deprecated for kernel 3.15.0 or newer]
# Analyse a dmesg log output file generated from this app during
# the execution phase. Create a set of device structures in memory
# for subsequent formatting in the html output file
# This call is only for legacy support on kernels where the ftrace
# data lacks the suspend_resume or device_pm_callbacks trace events.
# Arguments:
# data: an empty Data object (with dmesgtext) obtained from loadKernelLog
# Output:
# The filled Data object
def parseKernelLog(data):
phase = 'suspend_runtime'
if(data.fwValid):
vprint('Firmware Suspend = %u ns, Firmware Resume = %u ns' % \
(data.fwSuspend, data.fwResume))
# dmesg phase match table
dm = {
'suspend_prepare': 'PM: Syncing filesystems.*',
'suspend': 'PM: Entering [a-z]* sleep.*',
'suspend_late': 'PM: suspend of devices complete after.*',
'suspend_noirq': 'PM: late suspend of devices complete after.*',
'suspend_machine': 'PM: noirq suspend of devices complete after.*',
'resume_machine': 'ACPI: Low-level resume complete.*',
'resume_noirq': 'ACPI: Waking up from system sleep state.*',
'resume_early': 'PM: noirq resume of devices complete after.*',
'resume': 'PM: early resume of devices complete after.*',
'resume_complete': 'PM: resume of devices complete after.*',
'post_resume': '.*Restarting tasks \.\.\..*',
}
if(sysvals.suspendmode == 'standby'):
dm['resume_machine'] = 'PM: Restoring platform NVS memory'
elif(sysvals.suspendmode == 'disk'):
dm['suspend_late'] = 'PM: freeze of devices complete after.*'
dm['suspend_noirq'] = 'PM: late freeze of devices complete after.*'
dm['suspend_machine'] = 'PM: noirq freeze of devices complete after.*'
dm['resume_machine'] = 'PM: Restoring platform NVS memory'
dm['resume_early'] = 'PM: noirq restore of devices complete after.*'
dm['resume'] = 'PM: early restore of devices complete after.*'
dm['resume_complete'] = 'PM: restore of devices complete after.*'
elif(sysvals.suspendmode == 'freeze'):
dm['resume_machine'] = 'ACPI: resume from mwait'
# action table (expected events that occur and show up in dmesg)
at = {
'sync_filesystems': {
'smsg': 'PM: Syncing filesystems.*',
'emsg': 'PM: Preparing system for mem sleep.*' },
'freeze_user_processes': {
'smsg': 'Freezing user space processes .*',
'emsg': 'Freezing remaining freezable tasks.*' },
'freeze_tasks': {
'smsg': 'Freezing remaining freezable tasks.*',
'emsg': 'PM: Entering (?P<mode>[a-z,A-Z]*) sleep.*' },
'ACPI prepare': {
'smsg': 'ACPI: Preparing to enter system sleep state.*',
'emsg': 'PM: Saving platform NVS memory.*' },
'PM vns': {
'smsg': 'PM: Saving platform NVS memory.*',
'emsg': 'Disabling non-boot CPUs .*' },
}
t0 = -1.0
cpu_start = -1.0
prevktime = -1.0
actions = dict()
for line in data.dmesgtext:
# parse each dmesg line into the time and message
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
val = m.group('ktime')
try:
ktime = float(val)
except:
continue
msg = m.group('msg')
# initialize data start to first line time
if t0 < 0:
data.setStart(ktime)
t0 = ktime
else:
continue
# hack for determining resume_machine end for freeze
if(not sysvals.usetraceevents and sysvals.suspendmode == 'freeze' \
and phase == 'resume_machine' and \
re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
data.dmesg['resume_machine']['end'] = ktime
phase = 'resume_noirq'
data.dmesg[phase]['start'] = ktime
# suspend start
if(re.match(dm['suspend_prepare'], msg)):
phase = 'suspend_prepare'
data.dmesg[phase]['start'] = ktime
data.setStart(ktime)
data.tKernSus = ktime
# suspend start
elif(re.match(dm['suspend'], msg)):
data.dmesg['suspend_prepare']['end'] = ktime
phase = 'suspend'
data.dmesg[phase]['start'] = ktime
# suspend_late start
elif(re.match(dm['suspend_late'], msg)):
data.dmesg['suspend']['end'] = ktime
phase = 'suspend_late'
data.dmesg[phase]['start'] = ktime
# suspend_noirq start
elif(re.match(dm['suspend_noirq'], msg)):
data.dmesg['suspend_late']['end'] = ktime
phase = 'suspend_noirq'
data.dmesg[phase]['start'] = ktime
# suspend_machine start
elif(re.match(dm['suspend_machine'], msg)):
data.dmesg['suspend_noirq']['end'] = ktime
phase = 'suspend_machine'
data.dmesg[phase]['start'] = ktime
# resume_machine start
elif(re.match(dm['resume_machine'], msg)):
if(sysvals.suspendmode in ['freeze', 'standby']):
data.tSuspended = prevktime
data.dmesg['suspend_machine']['end'] = prevktime
else:
data.tSuspended = ktime
data.dmesg['suspend_machine']['end'] = ktime
phase = 'resume_machine'
data.tResumed = ktime
data.tLow = data.tResumed - data.tSuspended
data.dmesg[phase]['start'] = ktime
# resume_noirq start
elif(re.match(dm['resume_noirq'], msg)):
data.dmesg['resume_machine']['end'] = ktime
phase = 'resume_noirq'
data.dmesg[phase]['start'] = ktime
# resume_early start
elif(re.match(dm['resume_early'], msg)):
data.dmesg['resume_noirq']['end'] = ktime
phase = 'resume_early'
data.dmesg[phase]['start'] = ktime
# resume start
elif(re.match(dm['resume'], msg)):
data.dmesg['resume_early']['end'] = ktime
phase = 'resume'
data.dmesg[phase]['start'] = ktime
# resume complete start
elif(re.match(dm['resume_complete'], msg)):
data.dmesg['resume']['end'] = ktime
phase = 'resume_complete'
data.dmesg[phase]['start'] = ktime
# post resume start
elif(re.match(dm['post_resume'], msg)):
data.dmesg['resume_complete']['end'] = ktime
data.setEnd(ktime)
data.tKernRes = ktime
break
# -- device callbacks --
if(phase in data.phases):
# device init call
if(re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
sm = re.match('calling (?P<f>.*)\+ @ '+\
'(?P<n>.*), parent: (?P<p>.*)', msg);
f = sm.group('f')
n = sm.group('n')
p = sm.group('p')
if(f and n and p):
data.newAction(phase, f, int(n), p, ktime, -1, '')
# device init return
elif(re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs', msg)):
sm = re.match('call (?P<f>.*)\+ returned .* after '+\
'(?P<t>.*) usecs(?P<a>.*)', msg);
f = sm.group('f')
t = sm.group('t')
list = data.dmesg[phase]['list']
if(f in list):
dev = list[f]
dev['length'] = int(t)
dev['end'] = ktime
# if trace events are not available, these are better than nothing
if(not sysvals.usetraceevents):
# look for known actions
for a in at:
if(re.match(at[a]['smsg'], msg)):
if(a not in actions):
actions[a] = []
actions[a].append({'begin': ktime, 'end': ktime})
if(re.match(at[a]['emsg'], msg)):
if(a in actions):
actions[a][-1]['end'] = ktime
# now look for CPU on/off events
if(re.match('Disabling non-boot CPUs .*', msg)):
# start of first cpu suspend
cpu_start = ktime
elif(re.match('Enabling non-boot CPUs .*', msg)):
# start of first cpu resume
cpu_start = ktime
elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)):
# end of a cpu suspend, start of the next
m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)):
# end of a cpu resume, start of the next
m = re.match('CPU(?P<cpu>[0-9]*) is up', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
prevktime = ktime
# fill in any missing phases
lp = data.phases[0]
for p in data.phases:
if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0):
print('WARNING: phase "%s" is missing, something went wrong!' % p)
print(' In %s, this dmesg line denotes the start of %s:' % \
(sysvals.suspendmode, p))
print(' "%s"' % dm[p])
if(data.dmesg[p]['start'] < 0):
data.dmesg[p]['start'] = data.dmesg[lp]['end']
if(p == 'resume_machine'):
data.tSuspended = data.dmesg[lp]['end']
data.tResumed = data.dmesg[lp]['end']
data.tLow = 0
if(data.dmesg[p]['end'] < 0):
data.dmesg[p]['end'] = data.dmesg[p]['start']
lp = p
# fill in any actions we've found
for name in actions:
for event in actions[name]:
data.newActionGlobal(name, event['begin'], event['end'])
data.printDetails()
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
return True
def callgraphHTML(sv, hf, num, cg, title, color, devid):
html_func_top = '<article id="{0}" class="atop" style="background:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n'
html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n'
html_func_end = '</article>\n'
html_func_leaf = '<article>{0} {1}</article>\n'
cgid = devid
if cg.id:
cgid += cg.id
cglen = (cg.end - cg.start) * 1000
if cglen < sv.mincglen:
return num
fmt = '<r>(%.3f ms @ '+sv.timeformat+' to '+sv.timeformat+')</r>'
flen = fmt % (cglen, cg.start, cg.end)
hf.write(html_func_top.format(cgid, color, num, title, flen))
num += 1
for line in cg.list:
if(line.length < 0.000000001):
flen = ''
else:
fmt = '<n>(%.3f ms @ '+sv.timeformat+')</n>'
flen = fmt % (line.length*1000, line.time)
if(line.freturn and line.fcall):
hf.write(html_func_leaf.format(line.name, flen))
elif(line.freturn):
hf.write(html_func_end)
else:
hf.write(html_func_start.format(num, line.name, flen))
num += 1
hf.write(html_func_end)
return num
def addCallgraphs(sv, hf, data):
hf.write('<section id="callgraphs" class="callgraph">\n')
# write out the ftrace data converted to html
num = 0
for p in data.phases:
if sv.cgphase and p != sv.cgphase:
continue
list = data.dmesg[p]['list']
for devname in data.sortedDevices(p):
dev = list[devname]
color = 'white'
if 'color' in data.dmesg[p]:
color = data.dmesg[p]['color']
if 'color' in dev:
color = dev['color']
name = devname
if(devname in sv.devprops):
name = sv.devprops[devname].altName(devname)
if sv.suspendmode in suspendmodename:
name += ' '+p
if('ftrace' in dev):
cg = dev['ftrace']
num = callgraphHTML(sv, hf, num, cg,
name, color, dev['id'])
if('ftraces' in dev):
for cg in dev['ftraces']:
num = callgraphHTML(sv, hf, num, cg,
name+' → '+cg.name, color, dev['id'])
hf.write('\n\n </section>\n')
# Function: createHTMLSummarySimple
# Description:
# Create summary html file for a series of tests
# Arguments:
# testruns: array of Data objects from parseTraceLog
def createHTMLSummarySimple(testruns, htmlfile, folder):
# write the html header first (html head, css code, up to body start)
html = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>SleepGraph Summary</title>\n\
<style type=\'text/css\'>\n\
.stamp {width: 100%;text-align:center;background:#888;line-height:30px;color:white;font: 25px Arial;}\n\
table {width:100%;border-collapse: collapse;}\n\
.summary {border:1px solid;}\n\
th {border: 1px solid black;background:#222;color:white;}\n\
td {font: 16px "Times New Roman";text-align: center;}\n\
tr.alt td {background:#ddd;}\n\
tr.avg td {background:#aaa;}\n\
</style>\n</head>\n<body>\n'
# group test header
html += '<div class="stamp">%s (%d tests)</div>\n' % (folder, len(testruns))
th = '\t<th>{0}</th>\n'
td = '\t<td>{0}</td>\n'
tdlink = '\t<td><a href="{0}">html</a></td>\n'
# table header
html += '<table class="summary">\n<tr>\n' + th.format('#') +\
th.format('Mode') + th.format('Host') + th.format('Kernel') +\
th.format('Test Time') + th.format('Suspend') + th.format('Resume') +\
th.format('Detail') + '</tr>\n'
# test data, 1 row per test
avg = '<tr class="avg"><td></td><td></td><td></td><td></td>'+\
'<td>Average of {0} {1} tests</td><td>{2}</td><td>{3}</td><td></td></tr>\n'
sTimeAvg = rTimeAvg = 0.0
mode = ''
num = 0
for data in sorted(testruns, key=lambda v:(v['mode'], v['host'], v['kernel'])):
if mode != data['mode']:
# test average line
if(num > 0):
sTimeAvg /= (num - 1)
rTimeAvg /= (num - 1)
html += avg.format('%d' % (num - 1), mode,
'%3.3f ms' % sTimeAvg, '%3.3f ms' % rTimeAvg)
sTimeAvg = rTimeAvg = 0.0
mode = data['mode']
num = 1
# alternate row color
if num % 2 == 1:
html += '<tr class="alt">\n'
else:
html += '<tr>\n'
html += td.format("%d" % num)
num += 1
# basic info
for item in ['mode', 'host', 'kernel', 'time']:
val = "unknown"
if(item in data):
val = data[item]
html += td.format(val)
# suspend time
sTime = float(data['suspend'])
sTimeAvg += sTime
html += td.format('%.3f ms' % sTime)
# resume time
rTime = float(data['resume'])
rTimeAvg += rTime
html += td.format('%.3f ms' % rTime)
# link to the output html
html += tdlink.format(data['url']) + '</tr>\n'
# last test average line
if(num > 0):
sTimeAvg /= (num - 1)
rTimeAvg /= (num - 1)
html += avg.format('%d' % (num - 1), mode,
'%3.3f ms' % sTimeAvg, '%3.3f ms' % rTimeAvg)
# flush the data to file
hf = open(htmlfile, 'w')
hf.write(html+'</table>\n</body>\n</html>\n')
hf.close()
def ordinal(value):
suffix = 'th'
if value < 10 or value > 19:
if value % 10 == 1:
suffix = 'st'
elif value % 10 == 2:
suffix = 'nd'
elif value % 10 == 3:
suffix = 'rd'
return '%d%s' % (value, suffix)
# Function: createHTML
# Description:
# Create the output html file from the resident test data
# Arguments:
# testruns: array of Data objects from parseKernelLog or parseTraceLog
# Output:
# True if the html file was created, false if it failed
def createHTML(testruns):
if len(testruns) < 1:
print('ERROR: Not enough test data to build a timeline')
return
kerror = False
for data in testruns:
if data.kerror:
kerror = True
data.normalizeTime(testruns[-1].tSuspended)
# html function templates
html_error = '<div id="{1}" title="kernel error/warning" class="err" style="right:{0}%">ERROR→</div>\n'
html_traceevent = '<div title="{0}" class="traceevent{6}" style="left:{1}%;top:{2}px;height:{3}px;width:{4}%;line-height:{3}px;{7}">{5}</div>\n'
html_cpuexec = '<div class="jiffie" style="left:{0}%;top:{1}px;height:{2}px;width:{3}%;background:{4};"></div>\n'
html_legend = '<div id="p{3}" class="square" style="left:{0}%;background:{1}"> {2}</div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="green" title="{3}">{2} Suspend Time: <b>{0} ms</b></td>'\
'<td class="yellow" title="{4}">{2} Resume Time: <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal2 = '<table class="time1">\n<tr>'\
'<td class="green" title="{4}">{3} Suspend Time: <b>{0} ms</b></td>'\
'<td class="gray" title="time spent in low-power mode with clock running">'+sysvals.suspendmode+' time: <b>{1} ms</b></td>'\
'<td class="yellow" title="{5}">{3} Resume Time: <b>{2} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal3 = '<table class="time1">\n<tr>'\
'<td class="green">Execution Time: <b>{0} ms</b></td>'\
'<td class="yellow">Command: <b>{1}</b></td>'\
'</tr>\n</table>\n'
html_timegroups = '<table class="time2">\n<tr>'\
'<td class="green" title="time from kernel enter_state({5}) to firmware mode [kernel time only]">{4}Kernel Suspend: {0} ms</td>'\
'<td class="purple">{4}Firmware Suspend: {1} ms</td>'\
'<td class="purple">{4}Firmware Resume: {2} ms</td>'\
'<td class="yellow" title="time from firmware mode to return from kernel enter_state({5}) [kernel time only]">{4}Kernel Resume: {3} ms</td>'\
'</tr>\n</table>\n'
# html format variables
scaleH = 20
if kerror:
scaleH = 40
# device timeline
vprint('Creating Device Timeline...')
devtl = Timeline(30, scaleH)
# write the test title and general info header
devtl.createHeader(sysvals)
# Generate the header for this timeline
for data in testruns:
tTotal = data.end - data.start
sktime = (data.dmesg['suspend_machine']['end'] - \
data.tKernSus) * 1000
rktime = (data.dmesg['resume_complete']['end'] - \
data.dmesg['resume_machine']['start']) * 1000
if(tTotal == 0):
print('ERROR: No timeline data')
sys.exit()
if(data.tLow > 0):
low_time = '%.0f'%(data.tLow*1000)
if sysvals.suspendmode == 'command':
run_time = '%.0f'%((data.end-data.start)*1000)
if sysvals.testcommand:
testdesc = sysvals.testcommand
else:
testdesc = 'unknown'
if(len(testruns) > 1):
testdesc = ordinal(data.testnumber+1)+' '+testdesc
thtml = html_timetotal3.format(run_time, testdesc)
devtl.html += thtml
elif data.fwValid:
suspend_time = '%.0f'%(sktime + (data.fwSuspend/1000000.0))
resume_time = '%.0f'%(rktime + (data.fwResume/1000000.0))
testdesc1 = 'Total'
testdesc2 = ''
stitle = 'time from kernel enter_state(%s) to low-power mode [kernel & firmware time]' % sysvals.suspendmode
rtitle = 'time from low-power mode to return from kernel enter_state(%s) [firmware & kernel time]' % sysvals.suspendmode
if(len(testruns) > 1):
testdesc1 = testdesc2 = ordinal(data.testnumber+1)
testdesc2 += ' '
if(data.tLow == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc1, stitle, rtitle)
else:
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc1, stitle, rtitle)
devtl.html += thtml
sftime = '%.3f'%(data.fwSuspend / 1000000.0)
rftime = '%.3f'%(data.fwResume / 1000000.0)
devtl.html += html_timegroups.format('%.3f'%sktime, \
sftime, rftime, '%.3f'%rktime, testdesc2, sysvals.suspendmode)
else:
suspend_time = '%.3f' % sktime
resume_time = '%.3f' % rktime
testdesc = 'Kernel'
stitle = 'time from kernel enter_state(%s) to firmware mode [kernel time only]' % sysvals.suspendmode
rtitle = 'time from firmware mode to return from kernel enter_state(%s) [kernel time only]' % sysvals.suspendmode
if(len(testruns) > 1):
testdesc = ordinal(data.testnumber+1)+' '+testdesc
if(data.tLow == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc, stitle, rtitle)
else:
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc, stitle, rtitle)
devtl.html += thtml
# time scale for potentially multiple datasets
t0 = testruns[0].start
tMax = testruns[-1].end
tTotal = tMax - t0
# determine the maximum number of rows we need to draw
fulllist = []
threadlist = []
pscnt = 0
devcnt = 0
for data in testruns:
data.selectTimelineDevices('%f', tTotal, sysvals.mindevlen)
for group in data.devicegroups:
devlist = []
for phase in group:
for devname in data.tdevlist[phase]:
d = DevItem(data.testnumber, phase, data.dmesg[phase]['list'][devname])
devlist.append(d)
if d.isa('kth'):
threadlist.append(d)
else:
if d.isa('ps'):
pscnt += 1
else:
devcnt += 1
fulllist.append(d)
if sysvals.mixedphaseheight:
devtl.getPhaseRows(devlist)
if not sysvals.mixedphaseheight:
if len(threadlist) > 0 and len(fulllist) > 0:
if pscnt > 0 and devcnt > 0:
msg = 'user processes & device pm callbacks'
elif pscnt > 0:
msg = 'user processes'
else:
msg = 'device pm callbacks'
d = testruns[0].addHorizontalDivider(msg, testruns[-1].end)
fulllist.insert(0, d)
devtl.getPhaseRows(fulllist)
if len(threadlist) > 0:
d = testruns[0].addHorizontalDivider('asynchronous kernel threads', testruns[-1].end)
threadlist.insert(0, d)
devtl.getPhaseRows(threadlist, devtl.rows)
devtl.calcTotalRows()
# draw the full timeline
devtl.createZoomBox(sysvals.suspendmode, len(testruns))
phases = {'suspend':[],'resume':[]}
for phase in data.dmesg:
if 'resume' in phase:
phases['resume'].append(phase)
else:
phases['suspend'].append(phase)
# draw each test run chronologically
for data in testruns:
# now draw the actual timeline blocks
for dir in phases:
# draw suspend and resume blocks separately
bname = '%s%d' % (dir[0], data.testnumber)
if dir == 'suspend':
m0 = data.start
mMax = data.tSuspended
left = '%f' % (((m0-t0)*100.0)/tTotal)
else:
m0 = data.tSuspended
mMax = data.end
# in an x2 run, remove any gap between blocks
if len(testruns) > 1 and data.testnumber == 0:
mMax = testruns[1].start
left = '%f' % ((((m0-t0)*100.0)+sysvals.srgap/2)/tTotal)
mTotal = mMax - m0
# if a timeline block is 0 length, skip altogether
if mTotal == 0:
continue
width = '%f' % (((mTotal*100.0)-sysvals.srgap/2)/tTotal)
devtl.html += devtl.html_tblock.format(bname, left, width, devtl.scaleH)
for b in sorted(phases[dir]):
# draw the phase color background
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%f' % (((phase['start']-m0)*100.0)/mTotal)
width = '%f' % ((length*100.0)/mTotal)
devtl.html += devtl.html_phase.format(left, width, \
'%.3f'%devtl.scaleH, '%.3f'%devtl.bodyH, \
data.dmesg[b]['color'], '')
for e in data.errorinfo[dir]:
# draw red lines for any kernel errors found
t, err = e
right = '%f' % (((mMax-t)*100.0)/mTotal)
devtl.html += html_error.format(right, err)
for b in sorted(phases[dir]):
# draw the devices for this phase
phaselist = data.dmesg[b]['list']
for d in data.tdevlist[b]:
name = d
drv = ''
dev = phaselist[d]
xtraclass = ''
xtrainfo = ''
xtrastyle = ''
if 'htmlclass' in dev:
xtraclass = dev['htmlclass']
if 'color' in dev:
xtrastyle = 'background:%s;' % dev['color']
if(d in sysvals.devprops):
name = sysvals.devprops[d].altName(d)
xtraclass = sysvals.devprops[d].xtraClass()
xtrainfo = sysvals.devprops[d].xtraInfo()
elif xtraclass == ' kth':
xtrainfo = ' kernel_thread'
if('drv' in dev and dev['drv']):
drv = ' {%s}' % dev['drv']
rowheight = devtl.phaseRowHeight(data.testnumber, b, dev['row'])
rowtop = devtl.phaseRowTop(data.testnumber, b, dev['row'])
top = '%.3f' % (rowtop + devtl.scaleH)
left = '%f' % (((dev['start']-m0)*100)/mTotal)
width = '%f' % (((dev['end']-dev['start'])*100)/mTotal)
length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
title = name+drv+xtrainfo+length
if sysvals.suspendmode == 'command':
title += sysvals.testcommand
elif xtraclass == ' ps':
if 'suspend' in b:
title += 'pre_suspend_process'
else:
title += 'post_resume_process'
else:
title += b
devtl.html += devtl.html_device.format(dev['id'], \
title, left, top, '%.3f'%rowheight, width, \
d+drv, xtraclass, xtrastyle)
if('cpuexec' in dev):
for t in sorted(dev['cpuexec']):
start, end = t
j = float(dev['cpuexec'][t]) / 5
if j > 1.0:
j = 1.0
height = '%.3f' % (rowheight/3)
top = '%.3f' % (rowtop + devtl.scaleH + 2*rowheight/3)
left = '%f' % (((start-m0)*100)/mTotal)
width = '%f' % ((end-start)*100/mTotal)
color = 'rgba(255, 0, 0, %f)' % j
devtl.html += \
html_cpuexec.format(left, top, height, width, color)
if('src' not in dev):
continue
# draw any trace events for this device
for e in dev['src']:
height = '%.3f' % devtl.rowH
top = '%.3f' % (rowtop + devtl.scaleH + (e.row*devtl.rowH))
left = '%f' % (((e.time-m0)*100)/mTotal)
width = '%f' % (e.length*100/mTotal)
xtrastyle = ''
if e.color:
xtrastyle = 'background:%s;' % e.color
devtl.html += \
html_traceevent.format(e.title(), \
left, top, height, width, e.text(), '', xtrastyle)
# draw the time scale, try to make the number of labels readable
devtl.createTimeScale(m0, mMax, tTotal, dir)
devtl.html += '</div>\n'
# timeline is finished
devtl.html += '</div>\n</div>\n'
# draw a legend which describes the phases by color
if sysvals.suspendmode != 'command':
data = testruns[-1]
devtl.html += '<div class="legend">\n'
pdelta = 100.0/len(data.phases)
pmargin = pdelta / 4.0
for phase in data.phases:
tmp = phase.split('_')
id = tmp[0][0]
if(len(tmp) > 1):
id += tmp[1][0]
order = '%.2f' % ((data.dmesg[phase]['order'] * pdelta) + pmargin)
name = string.replace(phase, '_', ' ')
devtl.html += html_legend.format(order, \
data.dmesg[phase]['color'], name, id)
devtl.html += '</div>\n'
hf = open(sysvals.htmlfile, 'w')
# no header or css if its embedded
if(sysvals.embedded):
hf.write('pass True tSus %.3f tRes %.3f tLow %.3f fwvalid %s tSus %.3f tRes %.3f\n' %
(data.tSuspended-data.start, data.end-data.tSuspended, data.tLow, data.fwValid, \
data.fwSuspend/1000000, data.fwResume/1000000))
else:
addCSS(hf, sysvals, len(testruns), kerror)
# write the device timeline
hf.write(devtl.html)
hf.write('<div id="devicedetailtitle"></div>\n')
hf.write('<div id="devicedetail" style="display:none;">\n')
# draw the colored boxes for the device detail section
for data in testruns:
hf.write('<div id="devicedetail%d">\n' % data.testnumber)
pscolor = 'linear-gradient(to top left, #ccc, #eee)'
hf.write(devtl.html_phaselet.format('pre_suspend_process', \
'0', '0', pscolor))
for b in data.phases:
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
hf.write(devtl.html_phaselet.format(b, left, width, \
data.dmesg[b]['color']))
hf.write(devtl.html_phaselet.format('post_resume_process', \
'0', '0', pscolor))
if sysvals.suspendmode == 'command':
hf.write(devtl.html_phaselet.format('cmdexec', '0', '0', pscolor))
hf.write('</div>\n')
hf.write('</div>\n')
# write the ftrace data (callgraph)
if sysvals.cgtest >= 0 and len(testruns) > sysvals.cgtest:
data = testruns[sysvals.cgtest]
else:
data = testruns[-1]
if(sysvals.usecallgraph and not sysvals.embedded):
addCallgraphs(sysvals, hf, data)
# add the test log as a hidden div
if sysvals.logmsg:
hf.write('<div id="testlog" style="display:none;">\n'+sysvals.logmsg+'</div>\n')
# add the dmesg log as a hidden div
if sysvals.addlogs and sysvals.dmesgfile:
hf.write('<div id="dmesglog" style="display:none;">\n')
lf = open(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('<', '<').replace('>', '>')
hf.write(line)
lf.close()
hf.write('</div>\n')
# add the ftrace log as a hidden div
if sysvals.addlogs and sysvals.ftracefile:
hf.write('<div id="ftracelog" style="display:none;">\n')
lf = open(sysvals.ftracefile, 'r')
for line in lf:
hf.write(line)
lf.close()
hf.write('</div>\n')
if(not sysvals.embedded):
# write the footer and close
addScriptCode(hf, testruns)
hf.write('</body>\n</html>\n')
else:
# embedded out will be loaded in a page, skip the js
t0 = (testruns[0].start - testruns[-1].tSuspended) * 1000
tMax = (testruns[-1].end - testruns[-1].tSuspended) * 1000
# add js code in a div entry for later evaluation
detail = 'var bounds = [%f,%f];\n' % (t0, tMax)
detail += 'var devtable = [\n'
for data in testruns:
topo = data.deviceTopology()
detail += '\t"%s",\n' % (topo)
detail += '];\n'
hf.write('<div id=customcode style=display:none>\n'+detail+'</div>\n')
hf.close()
return True
def addCSS(hf, sv, testcount=1, kerror=False, extra=''):
kernel = sv.stamp['kernel']
host = sv.hostname[0].upper()+sv.hostname[1:]
mode = sv.suspendmode
if sv.suspendmode in suspendmodename:
mode = suspendmodename[sv.suspendmode]
title = host+' '+mode+' '+kernel
# various format changes by flags
cgchk = 'checked'
cgnchk = 'not(:checked)'
if sv.cgexp:
cgchk = 'not(:checked)'
cgnchk = 'checked'
hoverZ = 'z-index:8;'
if sv.usedevsrc:
hoverZ = ''
devlistpos = 'absolute'
if testcount > 1:
devlistpos = 'relative'
scaleTH = 20
if kerror:
scaleTH = 60
# write the html header first (html head, css code, up to body start)
html_header = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>'+title+'</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y:scroll;}\n\
.stamp {width:100%;text-align:center;background:gray;line-height:30px;color:white;font:25px Arial;}\n\
.callgraph {margin-top:30px;box-shadow:5px 5px 20px black;}\n\
.callgraph article * {padding-left:28px;}\n\
h1 {color:black;font:bold 30px Times;}\n\
t0 {color:black;font:bold 30px Times;}\n\
t1 {color:black;font:30px Times;}\n\
t2 {color:black;font:25px Times;}\n\
t3 {color:black;font:20px Times;white-space:nowrap;}\n\
t4 {color:black;font:bold 30px Times;line-height:60px;white-space:nowrap;}\n\
cS {font:bold 13px Times;}\n\
table {width:100%;}\n\
.gray {background:rgba(80,80,80,0.1);}\n\
.green {background:rgba(204,255,204,0.4);}\n\
.purple {background:rgba(128,0,128,0.2);}\n\
.yellow {background:rgba(255,255,204,0.4);}\n\
.blue {background:rgba(169,208,245,0.4);}\n\
.time1 {font:22px Arial;border:1px solid;}\n\
.time2 {font:15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\
td {text-align:center;}\n\
r {color:#500000;font:15px Tahoma;}\n\
n {color:#505050;font:15px Tahoma;}\n\
.tdhl {color:red;}\n\
.hide {display:none;}\n\
.pf {display:none;}\n\
.pf:'+cgchk+' + label {background:url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/><rect x="8" y="4" width="2" height="10" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:'+cgnchk+' ~ label {background:url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:'+cgchk+' ~ *:not(:nth-child(2)) {display:none;}\n\
.zoombox {position:relative;width:100%;overflow-x:scroll;-webkit-user-select:none;-moz-user-select:none;user-select:none;}\n\
.timeline {position:relative;font-size:14px;cursor:pointer;width:100%; overflow:hidden;background:linear-gradient(#cccccc, white);}\n\
.thread {position:absolute;height:0%;overflow:hidden;z-index:7;line-height:30px;font-size:14px;border:1px solid;text-align:center;white-space:nowrap;}\n\
.thread.ps {border-radius:3px;background:linear-gradient(to top, #ccc, #eee);}\n\
.thread:hover {background:white;border:1px solid red;'+hoverZ+'}\n\
.thread.sec,.thread.sec:hover {background:black;border:0;color:white;line-height:15px;font-size:10px;}\n\
.hover {background:white;border:1px solid red;'+hoverZ+'}\n\
.hover.sync {background:white;}\n\
.hover.bg,.hover.kth,.hover.sync,.hover.ps {background:white;}\n\
.jiffie {position:absolute;pointer-events: none;z-index:8;}\n\
.traceevent {position:absolute;font-size:10px;z-index:7;overflow:hidden;color:black;text-align:center;white-space:nowrap;border-radius:5px;border:1px solid black;background:linear-gradient(to bottom right,#CCC,#969696);}\n\
.traceevent:hover {color:white;font-weight:bold;border:1px solid white;}\n\
.phase {position:absolute;overflow:hidden;border:0px;text-align:center;}\n\
.phaselet {float:left;overflow:hidden;border:0px;text-align:center;min-height:100px;font-size:24px;}\n\
.t {position:absolute;line-height:'+('%d'%scaleTH)+'px;pointer-events:none;top:0;height:100%;border-right:1px solid black;z-index:6;}\n\
.err {position:absolute;top:0%;height:100%;border-right:3px solid red;color:red;font:bold 14px Times;line-height:18px;}\n\
.legend {position:relative; width:100%; height:40px; text-align:center;margin-bottom:20px}\n\
.legend .square {position:absolute;cursor:pointer;top:10px; width:0px;height:20px;border:1px solid;padding-left:20px;}\n\
button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\
.logbtn {position:relative;float:right;height:25px;width:50px;margin-top:3px;margin-bottom:0;font-size:10px;text-align:center;}\n\
.devlist {position:'+devlistpos+';width:190px;}\n\
a:link {color:white;text-decoration:none;}\n\
a:visited {color:white;}\n\
a:hover {color:white;}\n\
a:active {color:white;}\n\
.version {position:relative;float:left;color:white;font-size:10px;line-height:30px;margin-left:10px;}\n\
#devicedetail {min-height:100px;box-shadow:5px 5px 20px black;}\n\
.tblock {position:absolute;height:100%;background:#ddd;}\n\
.tback {position:absolute;width:100%;background:linear-gradient(#ccc, #ddd);}\n\
.bg {z-index:1;}\n\
'+extra+'\
</style>\n</head>\n<body>\n'
hf.write(html_header)
# Function: addScriptCode
# Description:
# Adds the javascript code to the output html
# Arguments:
# hf: the open html file pointer
# testruns: array of Data objects from parseKernelLog or parseTraceLog
def addScriptCode(hf, testruns):
t0 = testruns[0].start * 1000
tMax = testruns[-1].end * 1000
# create an array in javascript memory with the device details
detail = ' var devtable = [];\n'
for data in testruns:
topo = data.deviceTopology()
detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo)
detail += ' var bounds = [%f,%f];\n' % (t0, tMax)
# add the code which will manipulate the data in the browser
script_code = \
'<script type="text/javascript">\n'+detail+\
' var resolution = -1;\n'\
' var dragval = [0, 0];\n'\
' function redrawTimescale(t0, tMax, tS) {\n'\
' var rline = \'<div class="t" style="left:0;border-left:1px solid black;border-right:0;">\';\n'\
' var tTotal = tMax - t0;\n'\
' var list = document.getElementsByClassName("tblock");\n'\
' for (var i = 0; i < list.length; i++) {\n'\
' var timescale = list[i].getElementsByClassName("timescale")[0];\n'\
' var m0 = t0 + (tTotal*parseFloat(list[i].style.left)/100);\n'\
' var mTotal = tTotal*parseFloat(list[i].style.width)/100;\n'\
' var mMax = m0 + mTotal;\n'\
' var html = "";\n'\
' var divTotal = Math.floor(mTotal/tS) + 1;\n'\
' if(divTotal > 1000) continue;\n'\
' var divEdge = (mTotal - tS*(divTotal-1))*100/mTotal;\n'\
' var pos = 0.0, val = 0.0;\n'\
' for (var j = 0; j < divTotal; j++) {\n'\
' var htmlline = "";\n'\
' var mode = list[i].id[5];\n'\
' if(mode == "s") {\n'\
' pos = 100 - (((j)*tS*100)/mTotal) - divEdge;\n'\
' val = (j-divTotal+1)*tS;\n'\
' if(j == divTotal - 1)\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%"><cS>S→</cS></div>\';\n'\
' else\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
' } else {\n'\
' pos = 100 - (((j)*tS*100)/mTotal);\n'\
' val = (j)*tS;\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
' if(j == 0)\n'\
' if(mode == "r")\n'\
' htmlline = rline+"<cS>←R</cS></div>";\n'\
' else\n'\
' htmlline = rline+"<cS>0ms</div>";\n'\
' }\n'\
' html += htmlline;\n'\
' }\n'\
' timescale.innerHTML = html;\n'\
' }\n'\
' }\n'\
' function zoomTimeline() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' var left = zoombox.scrollLeft;\n'\
' var val = parseFloat(dmesg.style.width);\n'\
' var newval = 100;\n'\
' var sh = window.outerWidth / 2;\n'\
' if(this.id == "zoomin") {\n'\
' newval = val * 1.2;\n'\
' if(newval > 910034) newval = 910034;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
' } else if (this.id == "zoomout") {\n'\
' newval = val / 1.2;\n'\
' if(newval < 100) newval = 100;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
' } else {\n'\
' zoombox.scrollLeft = 0;\n'\
' dmesg.style.width = "100%";\n'\
' }\n'\
' var tS = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 20, 10, 5, 2, 1];\n'\
' var t0 = bounds[0];\n'\
' var tMax = bounds[1];\n'\
' var tTotal = tMax - t0;\n'\
' var wTotal = tTotal * 100.0 / newval;\n'\
' var idx = 7*window.innerWidth/1100;\n'\
' for(var i = 0; (i < tS.length)&&((wTotal / tS[i]) < idx); i++);\n'\
' if(i >= tS.length) i = tS.length - 1;\n'\
' if(tS[i] == resolution) return;\n'\
' resolution = tS[i];\n'\
' redrawTimescale(t0, tMax, tS[i]);\n'\
' }\n'\
' function deviceName(title) {\n'\
' var name = title.slice(0, title.indexOf(" ("));\n'\
' return name;\n'\
' }\n'\
' function deviceHover() {\n'\
' var name = deviceName(this.title);\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = deviceName(dev[i].title);\n'\
' var cname = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' dev[i].className = "hover "+cname;\n'\
' } else {\n'\
' dev[i].className = cname;\n'\
' }\n'\
' }\n'\
' }\n'\
' function deviceUnhover() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].className = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
' }\n'\
' }\n'\
' function deviceTitle(title, total, cpu) {\n'\
' var prefix = "Total";\n'\
' if(total.length > 3) {\n'\
' prefix = "Average";\n'\
' total[1] = (total[1]+total[3])/2;\n'\
' total[2] = (total[2]+total[4])/2;\n'\
' }\n'\
' var devtitle = document.getElementById("devicedetailtitle");\n'\
' var name = deviceName(title);\n'\
' if(cpu >= 0) name = "CPU"+cpu;\n'\
' var driver = "";\n'\
' var tS = "<t2>(</t2>";\n'\
' var tR = "<t2>)</t2>";\n'\
' if(total[1] > 0)\n'\
' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\
' if(total[2] > 0)\n'\
' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\
' var s = title.indexOf("{");\n'\
' var e = title.indexOf("}");\n'\
' if((s >= 0) && (e >= 0))\n'\
' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\
' if(total[1] > 0 && total[2] > 0)\n'\
' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\
' else\n'\
' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\
' return name;\n'\
' }\n'\
' function deviceDetail() {\n'\
' var devinfo = document.getElementById("devicedetail");\n'\
' devinfo.style.display = "block";\n'\
' var name = deviceName(this.title);\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var idlist = [];\n'\
' var pdata = [[]];\n'\
' if(document.getElementById("devicedetail1"))\n'\
' pdata = [[], []];\n'\
' var pd = pdata[0];\n'\
' var total = [0.0, 0.0, 0.0];\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = deviceName(dev[i].title);\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' idlist[idlist.length] = dev[i].id;\n'\
' var tidx = 1;\n'\
' if(dev[i].id[0] == "a") {\n'\
' pd = pdata[0];\n'\
' } else {\n'\
' if(pdata.length == 1) pdata[1] = [];\n'\
' if(total.length == 3) total[3]=total[4]=0.0;\n'\
' pd = pdata[1];\n'\
' tidx = 3;\n'\
' }\n'\
' var info = dev[i].title.split(" ");\n'\
' var pname = info[info.length-1];\n'\
' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\
' total[0] += pd[pname];\n'\
' if(pname.indexOf("suspend") >= 0)\n'\
' total[tidx] += pd[pname];\n'\
' else\n'\
' total[tidx+1] += pd[pname];\n'\
' }\n'\
' }\n'\
' var devname = deviceTitle(this.title, total, cpu);\n'\
' var left = 0.0;\n'\
' for (var t = 0; t < pdata.length; t++) {\n'\
' pd = pdata[t];\n'\
' devinfo = document.getElementById("devicedetail"+t);\n'\
' var phases = devinfo.getElementsByClassName("phaselet");\n'\
' for (var i = 0; i < phases.length; i++) {\n'\
' if(phases[i].id in pd) {\n'\
' var w = 100.0*pd[phases[i].id]/total[0];\n'\
' var fs = 32;\n'\
' if(w < 8) fs = 4*w | 0;\n'\
' var fs2 = fs*3/4;\n'\
' phases[i].style.width = w+"%";\n'\
' phases[i].style.left = left+"%";\n'\
' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\
' left += w;\n'\
' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\
' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace(new RegExp("_", "g"), " ")+"</t3>";\n'\
' phases[i].innerHTML = time+pname;\n'\
' } else {\n'\
' phases[i].style.width = "0%";\n'\
' phases[i].style.left = left+"%";\n'\
' }\n'\
' }\n'\
' }\n'\
' if(typeof devstats !== \'undefined\')\n'\
' callDetail(this.id, this.title);\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' if(cg.length < 10) return;\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' cgid = cg[i].id.split("x")[0]\n'\
' if(idlist.indexOf(cgid) >= 0) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function callDetail(devid, devtitle) {\n'\
' if(!(devid in devstats) || devstats[devid].length < 1)\n'\
' return;\n'\
' var list = devstats[devid];\n'\
' var tmp = devtitle.split(" ");\n'\
' var name = tmp[0], phase = tmp[tmp.length-1];\n'\
' var dd = document.getElementById(phase);\n'\
' var total = parseFloat(tmp[1].slice(1));\n'\
' var mlist = [];\n'\
' var maxlen = 0;\n'\
' var info = []\n'\
' for(var i in list) {\n'\
' if(list[i][0] == "@") {\n'\
' info = list[i].split("|");\n'\
' continue;\n'\
' }\n'\
' var tmp = list[i].split("|");\n'\
' var t = parseFloat(tmp[0]), f = tmp[1], c = parseInt(tmp[2]);\n'\
' var p = (t*100.0/total).toFixed(2);\n'\
' mlist[mlist.length] = [f, c, t.toFixed(2), p+"%"];\n'\
' if(f.length > maxlen)\n'\
' maxlen = f.length;\n'\
' }\n'\
' var pad = 5;\n'\
' if(mlist.length == 0) pad = 30;\n'\
' var html = \'<div style="padding-top:\'+pad+\'px"><t3> <b>\'+name+\':</b>\';\n'\
' if(info.length > 2)\n'\
' html += " start=<b>"+info[1]+"</b>, end=<b>"+info[2]+"</b>";\n'\
' if(info.length > 3)\n'\
' html += ", length<i>(w/o overhead)</i>=<b>"+info[3]+" ms</b>";\n'\
' if(info.length > 4)\n'\
' html += ", return=<b>"+info[4]+"</b>";\n'\
' html += "</t3></div>";\n'\
' if(mlist.length > 0) {\n'\
' html += \'<table class=fstat style="padding-top:\'+(maxlen*5)+\'px;"><tr><th>Function</th>\';\n'\
' for(var i in mlist)\n'\
' html += "<td class=vt>"+mlist[i][0]+"</td>";\n'\
' html += "</tr><tr><th>Calls</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][1]+"</td>";\n'\
' html += "</tr><tr><th>Time(ms)</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][2]+"</td>";\n'\
' html += "</tr><tr><th>Percent</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][3]+"</td>";\n'\
' html += "</tr></table>";\n'\
' }\n'\
' dd.innerHTML = html;\n'\
' var height = (maxlen*5)+100;\n'\
' dd.style.height = height+"px";\n'\
' document.getElementById("devicedetail").style.height = height+"px";\n'\
' }\n'\
' function callSelect() {\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' if(this.id == cg[i].id) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function devListWindow(e) {\n'\
' var win = window.open();\n'\
' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\
' "<style type=\\"text/css\\">"+\n'\
' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\
' "</style>"\n'\
' var dt = devtable[0];\n'\
' if(e.target.id != "devlist1")\n'\
' dt = devtable[1];\n'\
' win.document.write(html+dt);\n'\
' }\n'\
' function errWindow() {\n'\
' var text = this.id;\n'\
' var win = window.open();\n'\
' win.document.write("<pre>"+text+"</pre>");\n'\
' win.document.close();\n'\
' }\n'\
' function logWindow(e) {\n'\
' var name = e.target.id.slice(4);\n'\
' var win = window.open();\n'\
' var log = document.getElementById(name+"log");\n'\
' var title = "<title>"+document.title.split(" ")[0]+" "+name+" log</title>";\n'\
' win.document.write(title+"<pre>"+log.innerHTML+"</pre>");\n'\
' win.document.close();\n'\
' }\n'\
' function onClickPhase(e) {\n'\
' }\n'\
' function onMouseDown(e) {\n'\
' dragval[0] = e.clientX;\n'\
' dragval[1] = document.getElementById("dmesgzoombox").scrollLeft;\n'\
' document.onmousemove = onMouseMove;\n'\
' }\n'\
' function onMouseMove(e) {\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' zoombox.scrollLeft = dragval[1] + dragval[0] - e.clientX;\n'\
' }\n'\
' function onMouseUp(e) {\n'\
' document.onmousemove = null;\n'\
' }\n'\
' function onKeyPress(e) {\n'\
' var c = e.charCode;\n'\
' if(c != 42 && c != 43 && c != 45) return;\n'\
' var click = document.createEvent("Events");\n'\
' click.initEvent("click", true, false);\n'\
' if(c == 43) \n'\
' document.getElementById("zoomin").dispatchEvent(click);\n'\
' else if(c == 45)\n'\
' document.getElementById("zoomout").dispatchEvent(click);\n'\
' else if(c == 42)\n'\
' document.getElementById("zoomdef").dispatchEvent(click);\n'\
' }\n'\
' window.addEventListener("resize", function () {zoomTimeline();});\n'\
' window.addEventListener("load", function () {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' dmesg.style.width = "100%"\n'\
' dmesg.onmousedown = onMouseDown;\n'\
' document.onmouseup = onMouseUp;\n'\
' document.onkeypress = onKeyPress;\n'\
' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
' var list = document.getElementsByClassName("square");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = onClickPhase;\n'\
' var list = document.getElementsByClassName("err");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = errWindow;\n'\
' var list = document.getElementsByClassName("logbtn");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = logWindow;\n'\
' list = document.getElementsByClassName("devlist");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = devListWindow;\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].onclick = deviceDetail;\n'\
' dev[i].onmouseover = deviceHover;\n'\
' dev[i].onmouseout = deviceUnhover;\n'\
' }\n'\
' var dev = dmesg.getElementsByClassName("srccall");\n'\
' for (var i = 0; i < dev.length; i++)\n'\
' dev[i].onclick = callSelect;\n'\
' zoomTimeline();\n'\
' });\n'\
'</script>\n'
hf.write(script_code);
# Function: executeSuspend
# Description:
# Execute system suspend through the sysfs interface, then copy the output
# dmesg and ftrace files to the test output directory.
def executeSuspend():
pm = ProcessMonitor()
tp = sysvals.tpath
fwdata = []
# mark the start point in the kernel ring buffer just as we start
sysvals.initdmesg()
# start ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
print('START TRACING')
sysvals.fsetVal('1', 'tracing_on')
if sysvals.useprocmon:
pm.start()
# execute however many s/r runs requested
for count in range(1,sysvals.execcount+1):
# x2delay in between test runs
if(count > 1 and sysvals.x2delay > 0):
sysvals.fsetVal('WAIT %d' % sysvals.x2delay, 'trace_marker')
time.sleep(sysvals.x2delay/1000.0)
sysvals.fsetVal('WAIT END', 'trace_marker')
# start message
if sysvals.testcommand != '':
print('COMMAND START')
else:
if(sysvals.rtcwake):
print('SUSPEND START')
else:
print('SUSPEND START (press a key to resume)')
# set rtcwake
if(sysvals.rtcwake):
print('will issue an rtcwake in %d seconds' % sysvals.rtcwaketime)
sysvals.rtcWakeAlarmOn()
# start of suspend trace marker
if(sysvals.usecallgraph or sysvals.usetraceevents):
sysvals.fsetVal('SUSPEND START', 'trace_marker')
# predelay delay
if(count == 1 and sysvals.predelay > 0):
sysvals.fsetVal('WAIT %d' % sysvals.predelay, 'trace_marker')
time.sleep(sysvals.predelay/1000.0)
sysvals.fsetVal('WAIT END', 'trace_marker')
# initiate suspend or command
if sysvals.testcommand != '':
call(sysvals.testcommand+' 2>&1', shell=True);
else:
pf = open(sysvals.powerfile, 'w')
pf.write(sysvals.suspendmode)
# execution will pause here
try:
pf.close()
except:
pass
if(sysvals.rtcwake):
sysvals.rtcWakeAlarmOff()
# postdelay delay
if(count == sysvals.execcount and sysvals.postdelay > 0):
sysvals.fsetVal('WAIT %d' % sysvals.postdelay, 'trace_marker')
time.sleep(sysvals.postdelay/1000.0)
sysvals.fsetVal('WAIT END', 'trace_marker')
# return from suspend
print('RESUME COMPLETE')
if(sysvals.usecallgraph or sysvals.usetraceevents):
sysvals.fsetVal('RESUME COMPLETE', 'trace_marker')
if(sysvals.suspendmode == 'mem' or sysvals.suspendmode == 'command'):
fwdata.append(getFPDT(False))
# stop ftrace
if(sysvals.usecallgraph or sysvals.usetraceevents):
if sysvals.useprocmon:
pm.stop()
sysvals.fsetVal('0', 'tracing_on')
print('CAPTURING TRACE')
writeDatafileHeader(sysvals.ftracefile, fwdata)
call('cat '+tp+'trace >> '+sysvals.ftracefile, shell=True)
sysvals.fsetVal('', 'trace')
devProps()
# grab a copy of the dmesg output
print('CAPTURING DMESG')
writeDatafileHeader(sysvals.dmesgfile, fwdata)
sysvals.getdmesg()
def writeDatafileHeader(filename, fwdata):
fp = open(filename, 'a')
fp.write(sysvals.teststamp+'\n')
if(sysvals.suspendmode == 'mem' or sysvals.suspendmode == 'command'):
for fw in fwdata:
if(fw):
fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1]))
fp.close()
# Function: setUSBDevicesAuto
# Description:
# Set the autosuspend control parameter of all USB devices to auto
# This can be dangerous, so use at your own risk, most devices are set
# to always-on since the kernel cant determine if the device can
# properly autosuspend
def setUSBDevicesAuto():
rootCheck(True)
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/usb[0-9]*.*', dirname) and
'idVendor' in filenames and 'idProduct' in filenames):
call('echo auto > %s/power/control' % dirname, shell=True)
name = dirname.split('/')[-1]
desc = Popen(['cat', '%s/product' % dirname],
stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
ctrl = Popen(['cat', '%s/power/control' % dirname],
stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
print('control is %s for %6s: %s' % (ctrl, name, desc))
# Function: yesno
# Description:
# Print out an equivalent Y or N for a set of known parameter values
# Output:
# 'Y', 'N', or ' ' if the value is unknown
def yesno(val):
yesvals = ['auto', 'enabled', 'active', '1']
novals = ['on', 'disabled', 'suspended', 'forbidden', 'unsupported']
if val in yesvals:
return 'Y'
elif val in novals:
return 'N'
return ' '
# Function: ms2nice
# Description:
# Print out a very concise time string in minutes and seconds
# Output:
# The time string, e.g. "1901m16s"
def ms2nice(val):
ms = 0
try:
ms = int(val)
except:
return 0.0
m = ms / 60000
s = (ms / 1000) - (m * 60)
return '%3dm%2ds' % (m, s)
# Function: detectUSB
# Description:
# Detect all the USB hosts and devices currently connected and add
# a list of USB device names to sysvals for better timeline readability
def detectUSB():
field = {'idVendor':'', 'idProduct':'', 'product':'', 'speed':''}
power = {'async':'', 'autosuspend':'', 'autosuspend_delay_ms':'',
'control':'', 'persist':'', 'runtime_enabled':'',
'runtime_status':'', 'runtime_usage':'',
'runtime_active_time':'',
'runtime_suspended_time':'',
'active_duration':'',
'connected_duration':''}
print('LEGEND')
print('---------------------------------------------------------------------------------------------')
print(' A = async/sync PM queue Y/N D = autosuspend delay (seconds)')
print(' S = autosuspend Y/N rACTIVE = runtime active (min/sec)')
print(' P = persist across suspend Y/N rSUSPEN = runtime suspend (min/sec)')
print(' E = runtime suspend enabled/forbidden Y/N ACTIVE = active duration (min/sec)')
print(' R = runtime status active/suspended Y/N CONNECT = connected duration (min/sec)')
print(' U = runtime usage count')
print('---------------------------------------------------------------------------------------------')
print(' NAME ID DESCRIPTION SPEED A S P E R U D rACTIVE rSUSPEN ACTIVE CONNECT')
print('---------------------------------------------------------------------------------------------')
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/usb[0-9]*.*', dirname) and
'idVendor' in filenames and 'idProduct' in filenames):
for i in field:
field[i] = Popen(['cat', '%s/%s' % (dirname, i)],
stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
name = dirname.split('/')[-1]
for i in power:
power[i] = Popen(['cat', '%s/power/%s' % (dirname, i)],
stderr=PIPE, stdout=PIPE).stdout.read().replace('\n', '')
if(re.match('usb[0-9]*', name)):
first = '%-8s' % name
else:
first = '%8s' % name
print('%s [%s:%s] %-20s %-4s %1s %1s %1s %1s %1s %1s %1s %s %s %s %s' % \
(first, field['idVendor'], field['idProduct'], \
field['product'][0:20], field['speed'], \
yesno(power['async']), \
yesno(power['control']), \
yesno(power['persist']), \
yesno(power['runtime_enabled']), \
yesno(power['runtime_status']), \
power['runtime_usage'], \
power['autosuspend'], \
ms2nice(power['runtime_active_time']), \
ms2nice(power['runtime_suspended_time']), \
ms2nice(power['active_duration']), \
ms2nice(power['connected_duration'])))
# Function: devProps
# Description:
# Retrieve a list of properties for all devices in the trace log
def devProps(data=0):
props = dict()
if data:
idx = data.index(': ') + 2
if idx >= len(data):
return
devlist = data[idx:].split(';')
for dev in devlist:
f = dev.split(',')
if len(f) < 3:
continue
dev = f[0]
props[dev] = DevProps()
props[dev].altname = f[1]
if int(f[2]):
props[dev].async = True
else:
props[dev].async = False
sysvals.devprops = props
if sysvals.suspendmode == 'command' and 'testcommandstring' in props:
sysvals.testcommand = props['testcommandstring'].altname
return
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
# first get the list of devices we need properties for
msghead = 'Additional data added by AnalyzeSuspend'
alreadystamped = False
tp = TestProps()
tf = open(sysvals.ftracefile, 'r')
for line in tf:
if msghead in line:
alreadystamped = True
continue
# determine the trace data type (required for further parsing)
m = re.match(sysvals.tracertypefmt, line)
if(m):
tp.setTracerType(m.group('t'))
continue
# parse only valid lines, if this is not one move on
m = re.match(tp.ftrace_line_fmt, line)
if(not m or 'device_pm_callback_start' not in line):
continue
m = re.match('.*: (?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*', m.group('msg'));
if(not m):
continue
dev = m.group('d')
if dev not in props:
props[dev] = DevProps()
tf.close()
if not alreadystamped and sysvals.suspendmode == 'command':
out = '#\n# '+msghead+'\n# Device Properties: '
out += 'testcommandstring,%s,0;' % (sysvals.testcommand)
with open(sysvals.ftracefile, 'a') as fp:
fp.write(out+'\n')
sysvals.devprops = props
return
# now get the syspath for each of our target devices
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/power', dirname) and 'async' in filenames):
dev = dirname.split('/')[-2]
if dev in props and (not props[dev].syspath or len(dirname) < len(props[dev].syspath)):
props[dev].syspath = dirname[:-6]
# now fill in the properties for our target devices
for dev in props:
dirname = props[dev].syspath
if not dirname or not os.path.exists(dirname):
continue
with open(dirname+'/power/async') as fp:
text = fp.read()
props[dev].async = False
if 'enabled' in text:
props[dev].async = True
fields = os.listdir(dirname)
if 'product' in fields:
with open(dirname+'/product') as fp:
props[dev].altname = fp.read()
elif 'name' in fields:
with open(dirname+'/name') as fp:
props[dev].altname = fp.read()
elif 'model' in fields:
with open(dirname+'/model') as fp:
props[dev].altname = fp.read()
elif 'description' in fields:
with open(dirname+'/description') as fp:
props[dev].altname = fp.read()
elif 'id' in fields:
with open(dirname+'/id') as fp:
props[dev].altname = fp.read()
elif 'idVendor' in fields and 'idProduct' in fields:
idv, idp = '', ''
with open(dirname+'/idVendor') as fp:
idv = fp.read().strip()
with open(dirname+'/idProduct') as fp:
idp = fp.read().strip()
props[dev].altname = '%s:%s' % (idv, idp)
if props[dev].altname:
out = props[dev].altname.strip().replace('\n', ' ')
out = out.replace(',', ' ')
out = out.replace(';', ' ')
props[dev].altname = out
# and now write the data to the ftrace file
if not alreadystamped:
out = '#\n# '+msghead+'\n# Device Properties: '
for dev in sorted(props):
out += props[dev].out(dev)
with open(sysvals.ftracefile, 'a') as fp:
fp.write(out+'\n')
sysvals.devprops = props
# Function: getModes
# Description:
# Determine the supported power modes on this system
# Output:
# A string list of the available modes
def getModes():
modes = ''
if(os.path.exists(sysvals.powerfile)):
fp = open(sysvals.powerfile, 'r')
modes = string.split(fp.read())
fp.close()
return modes
# Function: getFPDT
# Description:
# Read the acpi bios tables and pull out FPDT, the firmware data
# Arguments:
# output: True to output the info to stdout, False otherwise
def getFPDT(output):
rectype = {}
rectype[0] = 'Firmware Basic Boot Performance Record'
rectype[1] = 'S3 Performance Table Record'
prectype = {}
prectype[0] = 'Basic S3 Resume Performance Record'
prectype[1] = 'Basic S3 Suspend Performance Record'
rootCheck(True)
if(not os.path.exists(sysvals.fpdtpath)):
if(output):
doError('file does not exist: %s' % sysvals.fpdtpath)
return False
if(not os.access(sysvals.fpdtpath, os.R_OK)):
if(output):
doError('file is not readable: %s' % sysvals.fpdtpath)
return False
if(not os.path.exists(sysvals.mempath)):
if(output):
doError('file does not exist: %s' % sysvals.mempath)
return False
if(not os.access(sysvals.mempath, os.R_OK)):
if(output):
doError('file is not readable: %s' % sysvals.mempath)
return False
fp = open(sysvals.fpdtpath, 'rb')
buf = fp.read()
fp.close()
if(len(buf) < 36):
if(output):
doError('Invalid FPDT table data, should '+\
'be at least 36 bytes')
return False
table = struct.unpack('4sIBB6s8sI4sI', buf[0:36])
if(output):
print('')
print('Firmware Performance Data Table (%s)' % table[0])
print(' Signature : %s' % table[0])
print(' Table Length : %u' % table[1])
print(' Revision : %u' % table[2])
print(' Checksum : 0x%x' % table[3])
print(' OEM ID : %s' % table[4])
print(' OEM Table ID : %s' % table[5])
print(' OEM Revision : %u' % table[6])
print(' Creator ID : %s' % table[7])
print(' Creator Revision : 0x%x' % table[8])
print('')
if(table[0] != 'FPDT'):
if(output):
doError('Invalid FPDT table')
return False
if(len(buf) <= 36):
return False
i = 0
fwData = [0, 0]
records = buf[36:]
fp = open(sysvals.mempath, 'rb')
while(i < len(records)):
header = struct.unpack('HBB', records[i:i+4])
if(header[0] not in rectype):
i += header[1]
continue
if(header[1] != 16):
i += header[1]
continue
addr = struct.unpack('Q', records[i+8:i+16])[0]
try:
fp.seek(addr)
first = fp.read(8)
except:
if(output):
print('Bad address 0x%x in %s' % (addr, sysvals.mempath))
return [0, 0]
rechead = struct.unpack('4sI', first)
recdata = fp.read(rechead[1]-8)
if(rechead[0] == 'FBPT'):
record = struct.unpack('HBBIQQQQQ', recdata)
if(output):
print('%s (%s)' % (rectype[header[0]], rechead[0]))
print(' Reset END : %u ns' % record[4])
print(' OS Loader LoadImage Start : %u ns' % record[5])
print(' OS Loader StartImage Start : %u ns' % record[6])
print(' ExitBootServices Entry : %u ns' % record[7])
print(' ExitBootServices Exit : %u ns' % record[8])
elif(rechead[0] == 'S3PT'):
if(output):
print('%s (%s)' % (rectype[header[0]], rechead[0]))
j = 0
while(j < len(recdata)):
prechead = struct.unpack('HBB', recdata[j:j+4])
if(prechead[0] not in prectype):
continue
if(prechead[0] == 0):
record = struct.unpack('IIQQ', recdata[j:j+prechead[1]])
fwData[1] = record[2]
if(output):
print(' %s' % prectype[prechead[0]])
print(' Resume Count : %u' % \
record[1])
print(' FullResume : %u ns' % \
record[2])
print(' AverageResume : %u ns' % \
record[3])
elif(prechead[0] == 1):
record = struct.unpack('QQ', recdata[j+4:j+prechead[1]])
fwData[0] = record[1] - record[0]
if(output):
print(' %s' % prectype[prechead[0]])
print(' SuspendStart : %u ns' % \
record[0])
print(' SuspendEnd : %u ns' % \
record[1])
print(' SuspendTime : %u ns' % \
fwData[0])
j += prechead[1]
if(output):
print('')
i += header[1]
fp.close()
return fwData
# Function: statusCheck
# Description:
# Verify that the requested command and options will work, and
# print the results to the terminal
# Output:
# True if the test will work, False if not
def statusCheck(probecheck=False):
status = True
print('Checking this system (%s)...' % platform.node())
# check we have root access
res = sysvals.colorText('NO (No features of this tool will work!)')
if(rootCheck(False)):
res = 'YES'
print(' have root access: %s' % res)
if(res != 'YES'):
print(' Try running this script with sudo')
return False
# check sysfs is mounted
res = sysvals.colorText('NO (No features of this tool will work!)')
if(os.path.exists(sysvals.powerfile)):
res = 'YES'
print(' is sysfs mounted: %s' % res)
if(res != 'YES'):
return False
# check target mode is a valid mode
if sysvals.suspendmode != 'command':
res = sysvals.colorText('NO')
modes = getModes()
if(sysvals.suspendmode in modes):
res = 'YES'
else:
status = False
print(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res))
if(res == 'NO'):
print(' valid power modes are: %s' % modes)
print(' please choose one with -m')
# check if ftrace is available
res = sysvals.colorText('NO')
ftgood = sysvals.verifyFtrace()
if(ftgood):
res = 'YES'
elif(sysvals.usecallgraph):
status = False
print(' is ftrace supported: %s' % res)
# check if kprobes are available
res = sysvals.colorText('NO')
sysvals.usekprobes = sysvals.verifyKprobes()
if(sysvals.usekprobes):
res = 'YES'
else:
sysvals.usedevsrc = False
print(' are kprobes supported: %s' % res)
# what data source are we using
res = 'DMESG'
if(ftgood):
sysvals.usetraceeventsonly = True
sysvals.usetraceevents = False
for e in sysvals.traceevents:
check = False
if(os.path.exists(sysvals.epath+e)):
check = True
if(not check):
sysvals.usetraceeventsonly = False
if(e == 'suspend_resume' and check):
sysvals.usetraceevents = True
if(sysvals.usetraceevents and sysvals.usetraceeventsonly):
res = 'FTRACE (all trace events found)'
elif(sysvals.usetraceevents):
res = 'DMESG and FTRACE (suspend_resume trace event found)'
print(' timeline data source: %s' % res)
# check if rtcwake
res = sysvals.colorText('NO')
if(sysvals.rtcpath != ''):
res = 'YES'
elif(sysvals.rtcwake):
status = False
print(' is rtcwake supported: %s' % res)
if not probecheck:
return status
# verify kprobes
if sysvals.usekprobes:
for name in sysvals.tracefuncs:
sysvals.defaultKprobe(name, sysvals.tracefuncs[name])
if sysvals.usedevsrc:
for name in sysvals.dev_tracefuncs:
sysvals.defaultKprobe(name, sysvals.dev_tracefuncs[name])
sysvals.addKprobes(True)
return status
# Function: doError
# Description:
# generic error function for catastrphic failures
# Arguments:
# msg: the error message to print
# help: True if printHelp should be called after, False otherwise
def doError(msg, help=False):
if(help == True):
printHelp()
print('ERROR: %s\n') % msg
sys.exit()
# Function: rootCheck
# Description:
# quick check to see if we have root access
def rootCheck(fatal):
if(os.access(sysvals.powerfile, os.W_OK)):
return True
if fatal:
doError('This command requires sysfs mount and root access')
return False
# Function: getArgInt
# Description:
# pull out an integer argument from the command line with checks
def getArgInt(name, args, min, max, main=True):
if main:
try:
arg = args.next()
except:
doError(name+': no argument supplied', True)
else:
arg = args
try:
val = int(arg)
except:
doError(name+': non-integer value given', True)
if(val < min or val > max):
doError(name+': value should be between %d and %d' % (min, max), True)
return val
# Function: getArgFloat
# Description:
# pull out a float argument from the command line with checks
def getArgFloat(name, args, min, max, main=True):
if main:
try:
arg = args.next()
except:
doError(name+': no argument supplied', True)
else:
arg = args
try:
val = float(arg)
except:
doError(name+': non-numerical value given', True)
if(val < min or val > max):
doError(name+': value should be between %f and %f' % (min, max), True)
return val
def processData():
print('PROCESSING DATA')
if(sysvals.usetraceeventsonly):
testruns = parseTraceLog()
if sysvals.dmesgfile:
dmesgtext = loadKernelLog(True)
for data in testruns:
data.extractErrorInfo(dmesgtext)
else:
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.ftracefile and (sysvals.usecallgraph or sysvals.usetraceevents)):
appendIncompleteTraceLog(testruns)
createHTML(testruns)
# Function: rerunTest
# Description:
# generate an output from an existing set of ftrace/dmesg logs
def rerunTest():
if sysvals.ftracefile:
doesTraceLogHaveTraceEvents()
if not sysvals.dmesgfile and not sysvals.usetraceeventsonly:
doError('recreating this html output requires a dmesg file')
sysvals.setOutputFile()
vprint('Output file: %s' % sysvals.htmlfile)
if(os.path.exists(sysvals.htmlfile) and not os.access(sysvals.htmlfile, os.W_OK)):
doError('missing permission to write to %s' % sysvals.htmlfile)
processData()
# Function: runTest
# Description:
# execute a suspend/resume, gather the logs, and generate the output
def runTest(subdir, testpath=''):
# prepare for the test
sysvals.initFtrace()
sysvals.initTestOutput(subdir, testpath)
vprint('Output files:\n\t%s\n\t%s\n\t%s' % \
(sysvals.dmesgfile, sysvals.ftracefile, sysvals.htmlfile))
# execute the test
executeSuspend()
sysvals.cleanupFtrace()
processData()
# if running as root, change output dir owner to sudo_user
if os.path.isdir(sysvals.testdir) and os.getuid() == 0 and \
'SUDO_USER' in os.environ:
cmd = 'chown -R {0}:{0} {1} > /dev/null 2>&1'
call(cmd.format(os.environ['SUDO_USER'], sysvals.testdir), shell=True)
def find_in_html(html, strs, div=False):
for str in strs:
l = len(str)
i = html.find(str)
if i >= 0:
break
if i < 0:
return ''
if not div:
return re.search(r'[-+]?\d*\.\d+|\d+', html[i+l:i+l+50]).group()
n = html[i+l:].find('</div>')
if n < 0:
return ''
return html[i+l:i+l+n]
# Function: runSummary
# Description:
# create a summary of tests in a sub-directory
def runSummary(subdir, local=True):
inpath = os.path.abspath(subdir)
outpath = inpath
if local:
outpath = os.path.abspath('.')
print('Generating a summary of folder "%s"' % inpath)
testruns = []
for dirname, dirnames, filenames in os.walk(subdir):
for filename in filenames:
if(not re.match('.*.html', filename)):
continue
file = os.path.join(dirname, filename)
html = open(file, 'r').read(10000)
suspend = find_in_html(html,
['Kernel Suspend: ', 'Kernel Suspend Time: '])
resume = find_in_html(html,
['Kernel Resume: ', 'Kernel Resume Time: '])
line = find_in_html(html, ['<div class="stamp">'], True)
stmp = line.split()
if not suspend or not resume or len(stmp) < 4:
continue
data = {
'host': stmp[0],
'kernel': stmp[1],
'mode': stmp[2],
'time': string.join(stmp[3:], ' '),
'suspend': suspend,
'resume': resume,
'url': os.path.relpath(file, outpath),
}
if len(stmp) == 7:
data['kernel'] = 'unknown'
data['mode'] = stmp[1]
data['time'] = string.join(stmp[2:], ' ')
testruns.append(data)
outfile = os.path.join(outpath, 'summary.html')
print('Summary file: %s' % outfile)
createHTMLSummarySimple(testruns, outfile, inpath)
# Function: checkArgBool
# Description:
# check if a boolean string value is true or false
def checkArgBool(value):
yes = ['1', 'true', 'yes', 'on']
if value.lower() in yes:
return True
return False
# Function: configFromFile
# Description:
# Configure the script via the info in a config file
def configFromFile(file):
Config = ConfigParser.ConfigParser()
Config.read(file)
sections = Config.sections()
overridekprobes = False
overridedevkprobes = False
if 'Settings' in sections:
for opt in Config.options('Settings'):
value = Config.get('Settings', opt).lower()
if(opt.lower() == 'verbose'):
sysvals.verbose = checkArgBool(value)
elif(opt.lower() == 'addlogs'):
sysvals.addlogs = checkArgBool(value)
elif(opt.lower() == 'dev'):
sysvals.usedevsrc = checkArgBool(value)
elif(opt.lower() == 'proc'):
sysvals.useprocmon = checkArgBool(value)
elif(opt.lower() == 'x2'):
if checkArgBool(value):
sysvals.execcount = 2
elif(opt.lower() == 'callgraph'):
sysvals.usecallgraph = checkArgBool(value)
elif(opt.lower() == 'override-timeline-functions'):
overridekprobes = checkArgBool(value)
elif(opt.lower() == 'override-dev-timeline-functions'):
overridedevkprobes = checkArgBool(value)
elif(opt.lower() == 'devicefilter'):
sysvals.setDeviceFilter(value)
elif(opt.lower() == 'expandcg'):
sysvals.cgexp = checkArgBool(value)
elif(opt.lower() == 'srgap'):
if checkArgBool(value):
sysvals.srgap = 5
elif(opt.lower() == 'mode'):
sysvals.suspendmode = value
elif(opt.lower() == 'command'):
sysvals.testcommand = value
elif(opt.lower() == 'x2delay'):
sysvals.x2delay = getArgInt('-x2delay', value, 0, 60000, False)
elif(opt.lower() == 'predelay'):
sysvals.predelay = getArgInt('-predelay', value, 0, 60000, False)
elif(opt.lower() == 'postdelay'):
sysvals.postdelay = getArgInt('-postdelay', value, 0, 60000, False)
elif(opt.lower() == 'maxdepth'):
sysvals.max_graph_depth = getArgInt('-maxdepth', value, 0, 1000, False)
elif(opt.lower() == 'rtcwake'):
if value.lower() == 'off':
sysvals.rtcwake = False
else:
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('-rtcwake', value, 0, 3600, False)
elif(opt.lower() == 'timeprec'):
sysvals.setPrecision(getArgInt('-timeprec', value, 0, 6, False))
elif(opt.lower() == 'mindev'):
sysvals.mindevlen = getArgFloat('-mindev', value, 0.0, 10000.0, False)
elif(opt.lower() == 'callloop-maxgap'):
sysvals.callloopmaxgap = getArgFloat('-callloop-maxgap', value, 0.0, 1.0, False)
elif(opt.lower() == 'callloop-maxlen'):
sysvals.callloopmaxgap = getArgFloat('-callloop-maxlen', value, 0.0, 1.0, False)
elif(opt.lower() == 'mincg'):
sysvals.mincglen = getArgFloat('-mincg', value, 0.0, 10000.0, False)
elif(opt.lower() == 'output-dir'):
sysvals.setOutputFolder(value)
if sysvals.suspendmode == 'command' and not sysvals.testcommand:
doError('No command supplied for mode "command"')
# compatibility errors
if sysvals.usedevsrc and sysvals.usecallgraph:
doError('-dev is not compatible with -f')
if sysvals.usecallgraph and sysvals.useprocmon:
doError('-proc is not compatible with -f')
if overridekprobes:
sysvals.tracefuncs = dict()
if overridedevkprobes:
sysvals.dev_tracefuncs = dict()
kprobes = dict()
kprobesec = 'dev_timeline_functions_'+platform.machine()
if kprobesec in sections:
for name in Config.options(kprobesec):
text = Config.get(kprobesec, name)
kprobes[name] = (text, True)
kprobesec = 'timeline_functions_'+platform.machine()
if kprobesec in sections:
for name in Config.options(kprobesec):
if name in kprobes:
doError('Duplicate timeline function found "%s"' % (name))
text = Config.get(kprobesec, name)
kprobes[name] = (text, False)
for name in kprobes:
function = name
format = name
color = ''
args = dict()
text, dev = kprobes[name]
data = text.split()
i = 0
for val in data:
# bracketted strings are special formatting, read them separately
if val[0] == '[' and val[-1] == ']':
for prop in val[1:-1].split(','):
p = prop.split('=')
if p[0] == 'color':
try:
color = int(p[1], 16)
color = '#'+p[1]
except:
color = p[1]
continue
# first real arg should be the format string
if i == 0:
format = val
# all other args are actual function args
else:
d = val.split('=')
args[d[0]] = d[1]
i += 1
if not function or not format:
doError('Invalid kprobe: %s' % name)
for arg in re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', format):
if arg not in args:
doError('Kprobe "%s" is missing argument "%s"' % (name, arg))
if (dev and name in sysvals.dev_tracefuncs) or (not dev and name in sysvals.tracefuncs):
doError('Duplicate timeline function found "%s"' % (name))
kp = {
'name': name,
'func': function,
'format': format,
sysvals.archargs: args
}
if color:
kp['color'] = color
if dev:
sysvals.dev_tracefuncs[name] = kp
else:
sysvals.tracefuncs[name] = kp
# Function: printHelp
# Description:
# print out the help text
def printHelp():
modes = getModes()
print('')
print('%s v%s' % (sysvals.title, sysvals.version))
print('Usage: sudo sleepgraph <options> <commands>')
print('')
print('Description:')
print(' This tool is designed to assist kernel and OS developers in optimizing')
print(' their linux stack\'s suspend/resume time. Using a kernel image built')
print(' with a few extra options enabled, the tool will execute a suspend and')
print(' capture dmesg and ftrace data until resume is complete. This data is')
print(' transformed into a device timeline and an optional callgraph to give')
print(' a detailed view of which devices/subsystems are taking the most')
print(' time in suspend/resume.')
print('')
print(' If no specific command is given, the default behavior is to initiate')
print(' a suspend/resume and capture the dmesg/ftrace output as an html timeline.')
print('')
print(' Generates output files in subdirectory: suspend-mmddyy-HHMMSS')
print(' HTML output: <hostname>_<mode>.html')
print(' raw dmesg output: <hostname>_<mode>_dmesg.txt')
print(' raw ftrace output: <hostname>_<mode>_ftrace.txt')
print('')
print('Options:')
print(' -h Print this help text')
print(' -v Print the current tool version')
print(' -config fn Pull arguments and config options from file fn')
print(' -verbose Print extra information during execution and analysis')
print(' -m mode Mode to initiate for suspend %s (default: %s)') % (modes, sysvals.suspendmode)
print(' -o subdir Override the output subdirectory')
print(' -rtcwake t Wakeup t seconds after suspend, set t to "off" to disable (default: 15)')
print(' -addlogs Add the dmesg and ftrace logs to the html output')
print(' -srgap Add a visible gap in the timeline between sus/res (default: disabled)')
print(' [advanced]')
print(' -cmd {s} Run the timeline over a custom command, e.g. "sync -d"')
print(' -proc Add usermode process info into the timeline (default: disabled)')
print(' -dev Add kernel function calls and threads to the timeline (default: disabled)')
print(' -x2 Run two suspend/resumes back to back (default: disabled)')
print(' -x2delay t Include t ms delay between multiple test runs (default: 0 ms)')
print(' -predelay t Include t ms delay before 1st suspend (default: 0 ms)')
print(' -postdelay t Include t ms delay after last resume (default: 0 ms)')
print(' -mindev ms Discard all device blocks shorter than ms milliseconds (e.g. 0.001 for us)')
print(' -multi n d Execute <n> consecutive tests at <d> seconds intervals. The outputs will')
print(' be created in a new subdirectory with a summary page.')
print(' [debug]')
print(' -f Use ftrace to create device callgraphs (default: disabled)')
print(' -maxdepth N limit the callgraph data to N call levels (default: 0=all)')
print(' -expandcg pre-expand the callgraph data in the html output (default: disabled)')
print(' -fadd file Add functions to be graphed in the timeline from a list in a text file')
print(' -filter "d1,d2,..." Filter out all but this comma-delimited list of device names')
print(' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)')
print(' -cgphase P Only show callgraph data for phase P (e.g. suspend_late)')
print(' -cgtest N Only show callgraph data for test N (e.g. 0 or 1 in an x2 run)')
print(' -timeprec N Number of significant digits in timestamps (0:S, [3:ms], 6:us)')
print(' [commands]')
print(' -ftrace ftracefile Create HTML output using ftrace input (used with -dmesg)')
print(' -dmesg dmesgfile Create HTML output using dmesg (used with -ftrace)')
print(' -summary directory Create a summary of all test in this dir')
print(' -modes List available suspend modes')
print(' -status Test to see if the system is enabled to run this tool')
print(' -fpdt Print out the contents of the ACPI Firmware Performance Data Table')
print(' -usbtopo Print out the current USB topology with power info')
print(' -usbauto Enable autosuspend for all connected USB devices')
print(' -flist Print the list of functions currently being captured in ftrace')
print(' -flistall Print all functions capable of being captured in ftrace')
print('')
return True
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
cmd = ''
cmdarg = ''
multitest = {'run': False, 'count': 0, 'delay': 0}
simplecmds = ['-modes', '-fpdt', '-flist', '-flistall', '-usbtopo', '-usbauto', '-status']
# loop through the command line arguments
args = iter(sys.argv[1:])
for arg in args:
if(arg == '-m'):
try:
val = args.next()
except:
doError('No mode supplied', True)
if val == 'command' and not sysvals.testcommand:
doError('No command supplied for mode "command"', True)
sysvals.suspendmode = val
elif(arg in simplecmds):
cmd = arg[1:]
elif(arg == '-h'):
printHelp()
sys.exit()
elif(arg == '-v'):
print("Version %s" % sysvals.version)
sys.exit()
elif(arg == '-x2'):
sysvals.execcount = 2
elif(arg == '-x2delay'):
sysvals.x2delay = getArgInt('-x2delay', args, 0, 60000)
elif(arg == '-predelay'):
sysvals.predelay = getArgInt('-predelay', args, 0, 60000)
elif(arg == '-postdelay'):
sysvals.postdelay = getArgInt('-postdelay', args, 0, 60000)
elif(arg == '-f'):
sysvals.usecallgraph = True
elif(arg == '-addlogs'):
sysvals.addlogs = True
elif(arg == '-verbose'):
sysvals.verbose = True
elif(arg == '-proc'):
sysvals.useprocmon = True
elif(arg == '-dev'):
sysvals.usedevsrc = True
elif(arg == '-maxdepth'):
sysvals.max_graph_depth = getArgInt('-maxdepth', args, 0, 1000)
elif(arg == '-rtcwake'):
try:
val = args.next()
except:
doError('No rtcwake time supplied', True)
if val.lower() == 'off':
sysvals.rtcwake = False
else:
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('-rtcwake', val, 0, 3600, False)
elif(arg == '-timeprec'):
sysvals.setPrecision(getArgInt('-timeprec', args, 0, 6))
elif(arg == '-mindev'):
sysvals.mindevlen = getArgFloat('-mindev', args, 0.0, 10000.0)
elif(arg == '-mincg'):
sysvals.mincglen = getArgFloat('-mincg', args, 0.0, 10000.0)
elif(arg == '-cgtest'):
sysvals.cgtest = getArgInt('-cgtest', args, 0, 1)
elif(arg == '-cgphase'):
try:
val = args.next()
except:
doError('No phase name supplied', True)
d = Data(0)
if val not in d.phases:
doError('Invalid phase, valid phaess are %s' % d.phases, True)
sysvals.cgphase = val
elif(arg == '-callloop-maxgap'):
sysvals.callloopmaxgap = getArgFloat('-callloop-maxgap', args, 0.0, 1.0)
elif(arg == '-callloop-maxlen'):
sysvals.callloopmaxlen = getArgFloat('-callloop-maxlen', args, 0.0, 1.0)
elif(arg == '-cmd'):
try:
val = args.next()
except:
doError('No command string supplied', True)
sysvals.testcommand = val
sysvals.suspendmode = 'command'
elif(arg == '-expandcg'):
sysvals.cgexp = True
elif(arg == '-srgap'):
sysvals.srgap = 5
elif(arg == '-multi'):
multitest['run'] = True
multitest['count'] = getArgInt('-multi n (exec count)', args, 2, 1000000)
multitest['delay'] = getArgInt('-multi d (delay between tests)', args, 0, 3600)
elif(arg == '-o'):
try:
val = args.next()
except:
doError('No subdirectory name supplied', True)
sysvals.setOutputFolder(val)
elif(arg == '-config'):
try:
val = args.next()
except:
doError('No text file supplied', True)
if(os.path.exists(val) == False):
doError('%s does not exist' % val)
configFromFile(val)
elif(arg == '-fadd'):
try:
val = args.next()
except:
doError('No text file supplied', True)
if(os.path.exists(val) == False):
doError('%s does not exist' % val)
sysvals.addFtraceFilterFunctions(val)
elif(arg == '-dmesg'):
try:
val = args.next()
except:
doError('No dmesg file supplied', True)
sysvals.notestrun = True
sysvals.dmesgfile = val
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s does not exist' % sysvals.dmesgfile)
elif(arg == '-ftrace'):
try:
val = args.next()
except:
doError('No ftrace file supplied', True)
sysvals.notestrun = True
sysvals.ftracefile = val
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
elif(arg == '-summary'):
try:
val = args.next()
except:
doError('No directory supplied', True)
cmd = 'summary'
cmdarg = val
sysvals.notestrun = True
if(os.path.isdir(val) == False):
doError('%s is not accesible' % val)
elif(arg == '-filter'):
try:
val = args.next()
except:
doError('No devnames supplied', True)
sysvals.setDeviceFilter(val)
else:
doError('Invalid argument: '+arg, True)
# compatibility errors
if(sysvals.usecallgraph and sysvals.usedevsrc):
doError('-dev is not compatible with -f')
if(sysvals.usecallgraph and sysvals.useprocmon):
doError('-proc is not compatible with -f')
# callgraph size cannot exceed device size
if sysvals.mincglen < sysvals.mindevlen:
sysvals.mincglen = sysvals.mindevlen
# just run a utility command and exit
if(cmd != ''):
if(cmd == 'status'):
statusCheck(True)
elif(cmd == 'fpdt'):
getFPDT(True)
elif(cmd == 'usbtopo'):
detectUSB()
elif(cmd == 'modes'):
print getModes()
elif(cmd == 'flist'):
sysvals.getFtraceFilterFunctions(True)
elif(cmd == 'flistall'):
sysvals.getFtraceFilterFunctions(False)
elif(cmd == 'usbauto'):
setUSBDevicesAuto()
elif(cmd == 'summary'):
runSummary(cmdarg, True)
sys.exit()
# if instructed, re-analyze existing data files
if(sysvals.notestrun):
rerunTest()
sys.exit()
# verify that we can run a test
if(not statusCheck()):
print('Check FAILED, aborting the test run!')
sys.exit()
if multitest['run']:
# run multiple tests in a separate subdirectory
s = 'x%d' % multitest['count']
if not sysvals.outdir:
sysvals.outdir = datetime.now().strftime('suspend-'+s+'-%m%d%y-%H%M%S')
if not os.path.isdir(sysvals.outdir):
os.mkdir(sysvals.outdir)
for i in range(multitest['count']):
if(i != 0):
print('Waiting %d seconds...' % (multitest['delay']))
time.sleep(multitest['delay'])
print('TEST (%d/%d) START' % (i+1, multitest['count']))
runTest(sysvals.outdir)
print('TEST (%d/%d) COMPLETE' % (i+1, multitest['count']))
runSummary(sysvals.outdir, False)
else:
# run the test in the current directory
runTest('.', sysvals.outdir)
| gpl-2.0 |
herilalaina/scikit-learn | sklearn/metrics/setup.py | 68 | 1061 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.pyx"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
MebiusHKU/flask-web | flask/lib/python2.7/site-packages/sqlalchemy/orm/query.py | 29 | 138170 | # orm/query.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative, InspectionAttr
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`. For a full
walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_suffixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_orm_only_adapt = True
_orm_only_from_obj_alias = True
_current_path = _path_registry
def __init__(self, entities, session=None):
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = from_obj
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
if not self._orm_only_adapt:
orm_only = False
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
orm_only if self._orm_only_from_obj_alias else False,
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _entity_zero(self):
return self._entities[0]
def _mapper_zero(self):
return self._select_from_entity \
if self._select_from_entity is not None \
else self._entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._mapper_zero()
)
def _bind_mapper(self):
ezero = self._mapper_zero()
if ezero is not None:
insp = inspect(ezero)
if not insp.is_clause_element:
return insp.mapper
return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._mapper_zero()
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# TODO: there's no tests covering effects of
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
.. versionadded:: 0.7.6
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `Postgresql WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\\
filter(Part.part=="our part").\\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.SelectBase.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`.Query.yield_per` method **is not compatible with most
eager loading schemes, including subqueryload and joinedload with
collections**. For this reason, it may be helpful to disable
eager loads, either unconditionally with
:meth:`.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True,
"max_row_buffer": count})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading_relationships`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
return self._get_impl(ident, loading.load_on_ident)
def _get_impl(self, ident, fallback_fn):
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return fallback_fn(self, key)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
self._correlate = self._correlate.union(
_interpret_as_from(s)
if s is not None else None
for s in args)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
"""
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is self._mapper_zero():
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`.Query` that will use the given :class:`.Session`.
"""
self.session = session
def from_self(self, *entities):
"""return a Query that selects from this Query's
SELECT statement.
\*entities - optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
q._select_from_entity = self._mapper_zero()
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes', '_suffixes'
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\\
join(User.address).\\
filter(User.name.like('%ed%')).\\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\\
order_by(None).\\
filter(User.id==5).\\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded "
"by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading_relationships` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
.. seealso::
:meth:`.Query.with_statement_hint`
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'):
"""add a statement hint to this :class:`.Select`.
This method is similar to :meth:`.Select.with_hint` except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into :meth:`.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new :class:`.Query` object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
.. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None):
"""return a new :class:`.Query` with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`.SelectBase.with_for_update`. When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a Postgresql backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes
the :meth:`.Query.with_lockmode` method.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
@_generative()
def params(self, *args, **kwargs):
"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. versionchanged:: 0.7.5
Multiple criteria joined by AND.
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
.. seealso::
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, an existing ORDER BY setting on the Query
object can be entirely cancelled by passing ``False``
as the value - use this before calling methods where
an ORDER BY is invalid.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
del self._order_by
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`"""
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`~.Query.having` is used in conjunction with
:meth:`~.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\\
join(User.addresses).\\
group_by(User.id).\\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._from_selectable(
expression.union(*([self] + list(q))))
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.union_all(*([self] + list(q)))
)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect(*([self] + list(q)))
)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect_all(*([self] + list(q)))
)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_(*([self] + list(q)))
)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_all(*([self] + list(q)))
)
def join(self, *props, **kwargs):
"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\\
join(User.orders).\\
join(Order.items).\\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`~.Query.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\\
join(User.addresses).\\
join(a_alias, User.addresses).\\
filter(Address.email_address=='ed@foo.com').\\
filter(a_alias.email_address=='ed@bar.com')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\\
where(Address.email_address.endswith("@bar.com")).\\
alias()
q = session.query(User).\\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\\
filter(Address.email_address == 'ed@foo.com').\\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\\
join("children", aliased=True).\\
filter(Node.name='child 1').\\
join("children", aliased=True, from_joinpoint=True).\\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1').\\
reset_joinpoint().\\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param \*props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`.Query.outerjoin` method were called. This
flag is here to maintain consistency with the same flag as accepted
by :meth:`.FromClause.join` and other Core constructs.
.. versionadded:: 1.0.0
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`~.Query.join` is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False),\
kwargs.pop('isouter', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
', '.join(sorted(kwargs)))
return self._join(props,
outerjoin=isouter, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
', '.join(sorted(kwargs)))
return self._join(props,
outerjoin=True, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
keylist = util.to_list(keys)
for idx, arg1 in enumerate(keylist):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, '_of_type', None)
else:
of_type = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
if idx == len(keylist) - 1:
util.warn(
"Pathed join target %s has already "
"been joined to; skipping" % prop)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is None:
raise sa_exc.InvalidRequestError(
"Don't know how to join from %s; please use "
"select_from() to establish the left "
"entity/selectable of this join" % self._entities[0])
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj) and \
sql_util.selectables_overlap(
from_obj, r_info.selectable):
overlap = True
break
if (overlap or not create_aliases) and \
l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(
right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(clause, right, onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
This method is similar to the :meth:`.Query.select_from`
method, in that it sets the FROM clause of the query. However,
where :meth:`.Query.select_from` only affects what is placed
in the FROM, this method also applies the given selectable
to replace the FROM which the selected entities would normally
select from.
The given ``from_obj`` must be an instance of a :class:`.FromClause`,
e.g. a :func:`.select` or :class:`.Alias` construct.
An example would be a :class:`.Query` that selects ``User`` entities,
but uses :meth:`.Query.select_entity_from` to have the entities
selected from a :func:`.select` construct instead of the
base ``user`` table::
select_stmt = select([User]).where(User.id == 7)
q = session.query(User).\\
select_entity_from(select_stmt).\\
filter(User.name == 'ed')
The query generated will select ``User`` entities directly
from the given :func:`.select` construct, and will be::
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
Notice above that even the WHERE criterion was "adapted" such that
the ``anon_1`` subquery effectively replaces all references to the
``user`` table, except for the one that it refers to internally.
Compare this to :meth:`.Query.select_from`, which as of
version 0.9, does not affect existing entities. The
statement below::
q = session.query(User).\\
select_from(select_stmt).\\
filter(User.name == 'ed')
Produces SQL where both the ``user`` table as well as the
``select_stmt`` construct are present as separate elements
in the FROM clause. No "adaptation" of the ``user`` table
is applied::
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
:meth:`.Query.select_entity_from` maintains an older
behavior of :meth:`.Query.select_from`. In modern usage,
similar results can also be achieved using :func:`.aliased`::
select_stmt = select([User]).where(User.id == 7)
user_from_select = aliased(User, select_stmt.alias())
q = session.query(user_from_select)
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`.
.. seealso::
:meth:`.Query.select_from`
.. versionadded:: 0.8
:meth:`.Query.select_entity_from` was added to specify
the specific behavior of entity replacement, however
the :meth:`.Query.select_from` maintains this behavior
as well until 0.9.
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""apply LIMIT/OFFSET to the ``Query`` based on a "
"range and return the newly resulting ``Query``."""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\\
prefix_with('HIGH_PRIORITY').\\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
.. seealso::
:meth:`.HasPrefixes.prefix_with`
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
@_generative()
def suffix_with(self, *suffixes):
"""Apply the suffix to the query and return the newly resulting
``Query``.
:param \*suffixes: optional suffixes, typically strings,
not using any commas.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.prefix_with`
:meth:`.HasSuffixes.suffix_with`
"""
if self._suffixes:
self._suffixes += suffixes
else:
self._suffixes = suffixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`~.expression.text`
or :func:`~.expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this :class:`.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling ``first()`` results in an execution of the underlying query.
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that does not return object
identities.
Note that an entity query, that is, one which selects one or
more mapped classes as opposed to individual column attributes,
may ultimately represent many rows but only one row of
unique entity or entities - this is a successful result for one().
Calling ``one()`` results in an execution of the underlying query.
.. versionchanged:: 0.6
``one()`` fully fetches all results instead of applying
any kind of limit, so that the "unique"-ing of entities does not
conceal multiple object identities.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def _connection_from_session(self, **kw):
conn = self.session.connection(
**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
mapper=self._bind_mapper(),
clause=querycontext.statement,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(querycontext.query, result, querycontext)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
'entity': User
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
'entity': User
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias,
'entity': user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(insp_ent, 'is_aliased_class', False),
'expr': ent.expr,
'entity':
getattr(insp_ent, "entity", None)
if ent.entity_zero is not None
and not insp_ent.is_clause_element
else None
}
for ent, insp_ent in [
(
_ent,
(inspect(_ent.entity_zero)
if _ent.entity_zero is not None else None)
)
for _ent in self._entities
]
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'suffixes': self._suffixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
.. versionadded:: 0.8.1
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(self.add_columns('1').with_labels().
statement.with_only_columns([1]))
def count(self):
"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
E.g.::
sess.query(User).filter(User.age == 25).\\
delete(synchronize_session=False)
sess.query(User).filter(User.age == 25).\\
delete(synchronize_session='evaluate')
.. warning:: The :meth:`.Query.delete` method is a "bulk" operation,
which bypasses ORM unit-of-work automation in favor of greater
performance. **Please read all caveats and warnings below.**
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
.. warning:: **Additional Caveats for bulk query deletes**
* The method does **not** offer in-Python cascading of
relationships - it is assumed that ON DELETE CASCADE/SET
NULL/etc. is configured for any foreign key references
which require it, otherwise the database may emit an
integrity violation if foreign key references are being
enforced.
After the DELETE, dependent objects in the
:class:`.Session` which were impacted by an ON DELETE
may not contain the current state, or may have been
deleted. This issue is resolved once the
:class:`.Session` is expired, which normally occurs upon
:meth:`.Session.commit` or can be forced by using
:meth:`.Session.expire_all`. Accessing an expired
object whose row has been deleted will invoke a SELECT
to locate the row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is
raised.
* The ``'fetch'`` strategy results in an additional
SELECT statement emitted and will significantly reduce
performance.
* The ``'evaluate'`` strategy performs a scan of
all matching objects within the :class:`.Session`; if the
contents of the :class:`.Session` are expired, such as
via a proceeding :meth:`.Session.commit` call, **this will
result in SELECT queries emitted for every matching object**.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events **are not invoked** from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to
act upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
# TODO: cascades need handling.
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate', update_args=None):
"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\\
update({User.age: User.age - 10}, synchronize_session=False)
sess.query(User).filter(User.age == 25).\\
update({"age": User.age - 10}, synchronize_session='evaluate')
.. warning:: The :meth:`.Query.update` method is a "bulk" operation,
which bypasses ORM unit-of-work automation in favor of greater
performance. **Please read all caveats and warnings below.**
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:param update_args: Optional dictionary, if present will be passed
to the underlying :func:`.update` construct as the ``**kw`` for
the object. May be used to pass dialect-specific arguments such
as ``mysql_limit``.
.. versionadded:: 1.0.0
:return: the count of rows matched as returned by the database's
"row count" feature.
.. warning:: **Additional Caveats for bulk query updates**
* The method does **not** offer in-Python cascading of
relationships - it is assumed that ON UPDATE CASCADE is
configured for any foreign key references which require
it, otherwise the database may emit an integrity
violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the
:class:`.Session` which were impacted by an ON UPDATE
CASCADE may not contain the current state; this issue is
resolved once the :class:`.Session` is expired, which
normally occurs upon :meth:`.Session.commit` or can be
forced by using :meth:`.Session.expire_all`.
* The ``'fetch'`` strategy results in an additional
SELECT statement emitted and will significantly reduce
performance.
* The ``'evaluate'`` strategy performs a scan of
all matching objects within the :class:`.Session`; if the
contents of the :class:`.Session` are expired, such as
via a proceeding :meth:`.Session.commit` call, **this will
result in SELECT queries emitted for every matching object**.
* The method supports multiple table updates, as detailed
in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and
other multiple table mappings. However, the **join
condition of an inheritance mapper is not
automatically rendered**. Care must be taken in any
multiple-table update to explicitly include the joining
condition between those tables, even in mappings where
this is normally automatic. E.g. if a class ``Engineer``
subclasses ``Employee``, an UPDATE of the ``Engineer``
local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\\
filter(Engineer.id == Employee.id).\\
filter(Employee.name == 'dilbert').\\
update({"engineer_type": "programmer"})
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events **are not invoked from this method**. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to
act upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_args = update_args or {}
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values, update_args)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
if self.dispatch.before_compile:
for fn in self.dispatch.before_compile:
new_query = fn(self)
if new_query is not None:
self = new_query
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
# else "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct and context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
context.primary_columns += order_by_col_expr
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
for (ext_info, adapter) in set(self._mapper_adapter_map.values()):
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
def __str__(self):
return str(self._compile_context().statement)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
self.entities = [entity]
self.expr = entity
supports_single_entity = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
filter_fn = id
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
only_load_props = query._only_load_props
refresh_state = context.refresh_state
else:
only_load_props = refresh_state = None
_instance = loading._instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=only_load_props,
refresh_state=refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
loading._setup_entity_query(
context, self.mapper, self,
self.path, adapter, context.primary_columns,
with_polymorphic=self._with_polymorphic,
only_load_props=query._only_load_props,
polymorphic_discriminator=self._polymorphic_discriminator)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(InspectionAttr):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
is_clause_element = False
is_mapper = False
is_aliased_class = False
def __init__(self, name, *exprs, **kw):
"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple('result', labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.entities = ()
self.filter_fn = lambda item: item
self.supports_single_entity = self.bundle.single_entity
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[ent.row_processor(query, context, result)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
search_entities = True
check_column = False
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
search_entities = False
check_column = True
_entity = None
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
_entity = getattr(column, '_parententity', None)
if _entity is not None:
search_entities = False
self._label_name = column.key
column = column._query_clause_element()
check_column = True
if isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
if hasattr(column, '_select_iterable'):
# break out an object like Table into
# individual columns
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
elif not check_column:
self._label_name = getattr(column, 'key', None)
search_entities = True
self.type = type_ = column.type
if type_.hashable:
self.filter_fn = lambda item: item
else:
counter = util.counter()
self.filter_fn = lambda item: counter()
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
if not search_entities:
self.entity_zero = _entity
if _entity:
self.entities = [_entity]
else:
self.entities = []
self._from_entities = set(self.entities)
else:
all_elements = [
elem for elem in visitors.iterate(column, {})
if 'parententity' in elem._annotations
]
self.entities = util.unique_list([
elem._annotations['parententity']
for elem in all_elements
if 'parententity' in elem._annotations
])
self._from_entities = set([
elem._annotations['parententity']
for elem in all_elements
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
])
if self.entities:
self.entity_zero = self.entities[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
self.entity_zero = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
if self.actual_froms.intersection(ext_info.selectable._from_objects):
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def row_processor(self, query, context, result):
if ('fetch_column', self) in context.attributes:
column = context.attributes[('fetch_column', self)]
else:
column = query._adapt_clause(self.column, False, True)
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = query._adapt_clause(self.column, False, True)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
context.attributes[('fetch_column', self)] = column
def __str__(self):
return str(self.column)
class QueryContext(object):
__slots__ = (
'multi_row_eager_loaders', 'adapter', 'froms', 'for_update',
'query', 'session', 'autoflush', 'populate_existing',
'invoke_all_eagers', 'version_check', 'refresh_state',
'primary_columns', 'secondary_columns', 'eager_order_by',
'eager_joins', 'create_eager_joins', 'propagate_options',
'attributes', 'statement', 'from_clause', 'whereclause',
'order_by', 'labels', '_for_update_arg', 'runid', 'partials'
)
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.multi_row_eager_loaders = False
self.adapter = None
self.froms = ()
self.for_update = None
self.query = query
self.session = query.session
self.autoflush = query._autoflush
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\\
union(users.select(users.c.user_id>7)).\\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
| bsd-3-clause |
schets/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 274 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
megahertz0/tusharedemo | sklearn_stock.py | 1 | 6088 | # -*- coding: utf-8 -*-
"""
Created on Sat May 06 18:35:03 2017
@author: megahertz
"""
import base
import datetime
from math import sqrt
import matplotlib.pyplot as plt
from matplotlib import pyplot
from multiprocessing import Pool
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.externals import joblib
import os
THREAD_POOL_SIZE = 6
OLD_DAYS = 30
FUTURE_DAYS = 10
TEST_DAYS = 10
DATA_Y_LEN = 1
#learn_model = LogisticRegression(C=1000.0, random_state=0)
#learn_model = SVC(kernel='linear', C=1.0, random_state=0)
learn_model = SVC(kernel='rbf', C=1.0, random_state=0, gamma=0.8)
#learn_model = KNeighborsClassifier(n_neighbors=3, p=2, metric='minkowski')
MODEL_NAME = 'svc_rbf_gamma_0.8'
MODELS_DIR = './output/sklearn/models/' + MODEL_NAME + '/'
# convert time series into supervised learning problem
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
def calcOperate(row):
if (row.high_future > row.close * 1.1):
return 1
if (row.high_future < row.close):
return -1
return 0
def prepareData(code):
df_all = base.getOneStockData(code)
if df_all.shape[0] < 300:
raise Exception('data is short.' + str(df_all.shape[0]))
in_columns = ['open','close','high','low','volume']
indexes = []
for i in range(OLD_DAYS):
for in_column in in_columns:
index = in_column + '__' + str(i)
indexes.append(index)
df_all[index] = df_all[in_column].shift(0 - i)
futureIndexes = []
for i in range(1, FUTURE_DAYS):
index = 'high_' + str(i)
futureIndexes.append(index)
df_all[index] = df_all.high.shift(i)
index = 'high_future'
df_all[index] = df_all[futureIndexes].apply(max, 1)
# indexes.append(index)
index = 'operate'
df_all[index] = df_all.apply(calcOperate, 1)
indexes.append(index)
# print df_all.head(10)
df = df_all[indexes].dropna()
return df
def check_stock(code, name):
try:
df = prepareData(code)
except Exception as e:
print((code + name))
print(e)
return 0
print((code + name + ' days=' + str(df.shape)))
X = df[df.columns[:0-DATA_Y_LEN]]
y = df[df.columns[0-DATA_Y_LEN:]]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)
# train = df.head(0-TEST_DAYS)
# # print train.columns
# X_train = train[train.columns[:0-DATA_Y_LEN]]
# y_train = train[train.columns[0-DATA_Y_LEN:]]
# # print trainX
# # print trainY
# test = df.tail(TEST_DAYS)
# X_test = test[test.columns[:0-DATA_Y_LEN]]
# y_test = test[test.columns[0-DATA_Y_LEN:]]
# print y_train.values
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
# X_test_std = sc.transform(X)
# y_test = y
learn_model.fit(X_train_std, y_train.operate.values)
y_pred = learn_model.predict(X_test_std)
# print('----------LogisticRegression-------------')
# print('Misclassified samples: %d' % (y_test.operate.values != y_pred).sum())
# print('Accuracy: %.2f' % accuracy_score(y_test.operate.values, y_pred))
joblib.dump(learn_model, MODELS_DIR + code + '.m')
return accuracy_score(y_test.operate.values, y_pred)
#def testAlgro():
# iris = datasets.load_iris()
# X = iris.data[:,[2,3]]
# y = iris.target
# np.unique(y)
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# print '------X_train--------'
# print X_train
# print '------X_test--------'
# print X_test
# # print type(X_train), type(X_train[0]), X_train[0], X_train[0][0]
# # print type(X_test), type(X_test[0][0])
# sc = StandardScaler()
# sc.fit(X_train)
# X_train_std = sc.transform(X_train)
# X_test_std = sc.transform(X_test)
# print '------X_train_std--------'
# print X_train_std
# print '------X_test_std--------'
# print X_test_std
def checkStockInThread(xxx_todo_changeme):
(index,row) = xxx_todo_changeme
code = index
name = row['name']
code_str = str(code).zfill(6)
score = check_stock(code_str, name)
return (index,score)
def checkAll():
all_stock = base.getAllStock()
pool = Pool(THREAD_POOL_SIZE)
results = pool.map(checkStockInThread, all_stock.iterrows())
pool.close()
pool.join()
for index,score in results:
all_stock.loc[index, 'score'] = score
newdata = all_stock.sort_values('score', 0, False)
newdata.to_csv(MODEL_NAME + '_test0.5_' + datetime.date.today().strftime('%Y-%m-%d') + '.csv')
if __name__ == '__main__':
# check_stock('600318', '新力金融')
os.makedirs(MODELS_DIR)
start = datetime.datetime.now()
checkAll()
end = datetime.datetime.now()
print(("use time ", end - start))
# testAlgro() | lgpl-3.0 |
rhiever/sklearn-benchmarks | model_code/random_search_preprocessing/MultinomialNB.py | 1 | 2033 | import sys
import pandas as pd
import numpy as np
from sklearn.preprocessing import Binarizer, MaxAbsScaler, MinMaxScaler
from sklearn.preprocessing import Normalizer, PolynomialFeatures, RobustScaler, StandardScaler
from sklearn.decomposition import FastICA, PCA
from sklearn.kernel_approximation import RBFSampler, Nystroem
from sklearn.cluster import FeatureAgglomeration
from sklearn.feature_selection import SelectFwe, SelectPercentile, VarianceThreshold
from sklearn.feature_selection import SelectFromModel, RFE
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.naive_bayes import MultinomialNB
from evaluate_model import evaluate_model
dataset = sys.argv[1]
num_param_combinations = int(sys.argv[2])
random_seed = int(sys.argv[3])
preprocessor_num = int(sys.argv[4])
np.random.seed(random_seed)
preprocessor_list = [Binarizer, MaxAbsScaler, MinMaxScaler, Normalizer,
PolynomialFeatures, RobustScaler, StandardScaler,
FastICA, PCA, RBFSampler, Nystroem, FeatureAgglomeration,
SelectFwe, SelectPercentile, VarianceThreshold,
SelectFromModel, RFE]
chosen_preprocessor = preprocessor_list[preprocessor_num]
pipeline_components = [chosen_preprocessor, MultinomialNB]
pipeline_parameters = {}
alpha_values = np.random.uniform(low=0., high=10., size=num_param_combinations)
fit_prior_values = np.random.choice([True, False], size=num_param_combinations)
all_param_combinations = zip(alpha_values, fit_prior_values)
pipeline_parameters[MultinomialNB] = \
[{'alpha': alpha, 'fit_prior': fit_prior}
for (alpha, fit_prior) in all_param_combinations]
if chosen_preprocessor is SelectFromModel:
pipeline_parameters[SelectFromModel] = [{'estimator': ExtraTreesClassifier(n_estimators=100, random_state=324089)}]
elif chosen_preprocessor is RFE:
pipeline_parameters[RFE] = [{'estimator': ExtraTreesClassifier(n_estimators=100, random_state=324089)}]
evaluate_model(dataset, pipeline_components, pipeline_parameters)
| mit |
schets/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 57 | 13752 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((rows.shape[0], X.shape[1]), dtype=np.float64)
assign_rows_csr(X, rows,
np.arange(out.shape[0], dtype=np.intp)[::-1], out)
assert_array_equal(out, X[rows].toarray()[::-1])
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
keras-team/keras-io | examples/generative/pixelcnn.py | 1 | 6349 | """
Title: PixelCNN
Author: [ADMoreau](https://github.com/ADMoreau)
Date created: 2020/05/17
Last modified: 2020/05/23
Description: PixelCNN implemented in Keras.
"""
"""
## Introduction
PixelCNN is a generative model proposed in 2016 by van den Oord et al.
(reference: [Conditional Image Generation with PixelCNN Decoders](https://arxiv.org/abs/1606.05328)).
It is designed to generate images (or other data types) iteratively
from an input vector where the probability distribution of prior elements dictates the
probability distribution of later elements. In the following example, images are generated
in this fashion, pixel-by-pixel, via a masked convolution kernel that only looks at data
from previously generated pixels (origin at the top left) to generate later pixels.
During inference, the output of the network is used as a probability ditribution
from which new pixel values are sampled to generate a new image
(here, with MNIST, the pixels values are either black or white).
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tqdm import tqdm
"""
## Getting the Data
"""
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
n_residual_blocks = 5
# The data, split between train and test sets
(x, _), (y, _) = keras.datasets.mnist.load_data()
# Concatenate all of the images together
data = np.concatenate((x, y), axis=0)
# Round all pixel values less than 33% of the max 256 value to 0
# anything above this value gets rounded up to 1 so that all values are either
# 0 or 1
data = np.where(data < (0.33 * 256), 0, 1)
data = data.astype(np.float32)
"""
## Create two classes for the requisite Layers for the model
"""
# The first layer is the PixelCNN layer. This layer simply
# builds on the 2D convolutional layer, but includes masking.
class PixelConvLayer(layers.Layer):
def __init__(self, mask_type, **kwargs):
super(PixelConvLayer, self).__init__()
self.mask_type = mask_type
self.conv = layers.Conv2D(**kwargs)
def build(self, input_shape):
# Build the conv2d layer to initialize kernel variables
self.conv.build(input_shape)
# Use the initialized kernel to create the mask
kernel_shape = self.conv.kernel.get_shape()
self.mask = np.zeros(shape=kernel_shape)
self.mask[: kernel_shape[0] // 2, ...] = 1.0
self.mask[kernel_shape[0] // 2, : kernel_shape[1] // 2, ...] = 1.0
if self.mask_type == "B":
self.mask[kernel_shape[0] // 2, kernel_shape[1] // 2, ...] = 1.0
def call(self, inputs):
self.conv.kernel.assign(self.conv.kernel * self.mask)
return self.conv(inputs)
# Next, we build our residual block layer.
# This is just a normal residual block, but based on the PixelConvLayer.
class ResidualBlock(keras.layers.Layer):
def __init__(self, filters, **kwargs):
super(ResidualBlock, self).__init__(**kwargs)
self.conv1 = keras.layers.Conv2D(
filters=filters, kernel_size=1, activation="relu"
)
self.pixel_conv = PixelConvLayer(
mask_type="B",
filters=filters // 2,
kernel_size=3,
activation="relu",
padding="same",
)
self.conv2 = keras.layers.Conv2D(
filters=filters, kernel_size=1, activation="relu"
)
def call(self, inputs):
x = self.conv1(inputs)
x = self.pixel_conv(x)
x = self.conv2(x)
return keras.layers.add([inputs, x])
"""
## Build the model based on the original paper
"""
inputs = keras.Input(shape=input_shape)
x = PixelConvLayer(
mask_type="A", filters=128, kernel_size=7, activation="relu", padding="same"
)(inputs)
for _ in range(n_residual_blocks):
x = ResidualBlock(filters=128)(x)
for _ in range(2):
x = PixelConvLayer(
mask_type="B",
filters=128,
kernel_size=1,
strides=1,
activation="relu",
padding="valid",
)(x)
out = keras.layers.Conv2D(
filters=1, kernel_size=1, strides=1, activation="sigmoid", padding="valid"
)(x)
pixel_cnn = keras.Model(inputs, out)
adam = keras.optimizers.Adam(learning_rate=0.0005)
pixel_cnn.compile(optimizer=adam, loss="binary_crossentropy")
pixel_cnn.summary()
pixel_cnn.fit(
x=data, y=data, batch_size=128, epochs=50, validation_split=0.1, verbose=2
)
"""
## Demonstration
The PixelCNN cannot generate the full image at once. Instead, it must generate each pixel in
order, append the last generated pixel to the current image, and feed the image back into the
model to repeat the process.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/pixel-cnn-mnist)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/pixelcnn-mnist-image-generation).
"""
from IPython.display import Image, display
# Create an empty array of pixels.
batch = 4
pixels = np.zeros(shape=(batch,) + (pixel_cnn.input_shape)[1:])
batch, rows, cols, channels = pixels.shape
# Iterate over the pixels because generation has to be done sequentially pixel by pixel.
for row in tqdm(range(rows)):
for col in range(cols):
for channel in range(channels):
# Feed the whole array and retrieving the pixel value probabilities for the next
# pixel.
probs = pixel_cnn.predict(pixels)[:, row, col, channel]
# Use the probabilities to pick pixel values and append the values to the image
# frame.
pixels[:, row, col, channel] = tf.math.ceil(
probs - tf.random.uniform(probs.shape)
)
def deprocess_image(x):
# Stack the single channeled black and white image to rgb values.
x = np.stack((x, x, x), 2)
# Undo preprocessing
x *= 255.0
# Convert to uint8 and clip to the valid range [0, 255]
x = np.clip(x, 0, 255).astype("uint8")
return x
# Iterate over the generated images and plot them with matplotlib.
for i, pic in enumerate(pixels):
keras.preprocessing.image.save_img(
"generated_image_{}.png".format(i), deprocess_image(np.squeeze(pic, -1))
)
display(Image("generated_image_0.png"))
display(Image("generated_image_1.png"))
display(Image("generated_image_2.png"))
display(Image("generated_image_3.png"))
| apache-2.0 |
zmr/namsel | config_manager.py | 1 | 3038 | '''A set of utilities for defining and working with namsel-ocr config files
'''
import json
import codecs
import os
import glob
import numpy as np
from utils import create_unique_id, local_file
from collections import Counter
CONF_DIR = './confs'
if not os.path.exists(CONF_DIR):
os.mkdir(CONF_DIR)
def _open(fl, mode='r'):
return codecs.open(fl, mode, encoding='utf-8')
default_config = {
'page_type': 'book',
'line_break_method': 'line_cut',
'recognizer': 'hmm', # or probout
'break_width': 2.0,
'segmenter': 'stochastic', # or experimental
'combine_hangoff': .6,
'low_ink': False,
'line_cluster_pos': 'top', # or center
'viterbi_postprocessing': False, # determine if main is running using viterbi post processing
'postprocess': False, # Run viterbi (or possibly some other) post processing
'stop_line_cut': False,
'detect_o': False,
'clear_hr': False,
'line_cut_inflation': 4, # The number of iterations when dilating text in line cut. Increase this value when need to blob things together
}
def update_default():
json.dump(default_config, _open(os.path.join(CONF_DIR, 'default.conf'), 'w'), indent=1)
def create_misc_confs():
from sklearn.grid_search import ParameterGrid
params = {'break_width': [1.5, 2.0, 3.6, 5.0],
'recognizer': ['probout', 'hmm'], 'combine_hangoff': [.4, .6, .8],
'postprocess': [True, False], 'segmenter': ['experimental', 'stochastic'],
'line_cluster_pos': ['top', 'center'],
}
grid = ParameterGrid(params)
for pr in grid:
Config(save_conf=True, **pr)
class Config(object):
def __init__(self, path=None, save_conf=False, **kwargs):
self.conf = default_config
self.path = path
if path:
# Over-write defaults
self._load_json_set_conf(path)
# Set any manually specified config settings
for k in kwargs:
self.conf[k] = kwargs[k]
if kwargs and save_conf:
self._save_conf()
# Set conf params as attributes to conf obj
for k in self.conf:
if k not in self.__dict__:
setattr(self, k, self.conf[k])
def _load_json_set_conf(self, path):
try:
conf = json.load(_open(path))
for k in conf:
self.conf[k] = conf[k]
except IOError:
print 'Error in loading json file at %s. Using default config' % path
self.conf = default_config
def _save_conf(self):
'''Save a conf if it doesn't already exist'''
confs = glob.glob(os.path.join(CONF_DIR, '*.conf'))
for conf in confs:
conf = json.load(_open(conf))
if conf == self.conf:
return
else:
json.dump(self.conf, _open(os.path.join(CONF_DIR, create_unique_id()+'.conf'), 'w'), indent=1)
| mit |
schets/scikit-learn | sklearn/feature_selection/rfe.py | 8 | 16893 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Vincent Michel <vincent.michel@inria.fr>
# Gilles Louppe <g.louppe@gmail.com>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import _check_cv as check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| bsd-3-clause |
schets/scikit-learn | examples/linear_model/plot_omp.py | 379 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
schets/scikit-learn | sklearn/setup.py | 224 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
linsalrob/EdwardsLab | sra/study_types.py | 1 | 3096 | import sys
__author__ = 'Rob Edwards'
import sqlite3
import argparse
def get_cursor(database_file):
"""
Open the database and get the cursor
:param database_file:The sql lite database
:type database_file:str
:return:
:rtype:
"""
sql = sqlite3.connect(database_file)
return sql.cursor()
def count_study_types(database_file):
"""
Count the study types in the database
:param database_file:The sql lite database
:type database_file:str
:return:
:rtype:
"""
sql = get_cursor(database_file)
count = {}
for row in sql.execute('select study_type from study'):
count[row[0]] = count.get(row[0], 0)+1
print("Study type\tInstances")
for t in count:
print("{}\t{}".format(t, count[t]))
def count_metagenome_types(database_file):
"""
Count the library strategies with the different metagenome datasets
:param database_file: The sql lite database
:type database_file: str
:return:
:rtype:
"""
sql = get_cursor(database_file)
count = {}
print("Amplicon Studies")
for row in sql.execute(
'select study_type, count(1) from study where study_accession in (select study_accession from experiment where library_strategy = "AMPLICON") group by study_type;'):
print("\t".join(map(str, row)))
def get_metagenome_run_ids(database_file, amplicons=False):
"""
Get all the run id's for the metagenome sequences, If amplicons is false we get the non-amplicon sequences. If
amplicons is true we only get the amplicon sequences.
:param database_file: The sql lite file
:type database_file: str
:param amplicons: Whether to get amplicon sequences or not
:type amplicons: bool
:return:
:rtype:
"""
sql = get_cursor(database_file)
# select experiment_accession from study left join experiment on
# study.study_accession = experiment.study_accession where experiment.library_strategy = 'FL-cDNA' and study_type NOT LIKE '%their%';
# first build the query to get the experiment accession ids
query = 'select experiment_accession from study left join experiment on study.study_accession = '
query += 'experiment.study_accession where experiment.library_strategy'
if amplicons:
query += ' = "AMPLICON"'
else:
query += ' != "AMPLICON"'
query += ' and study_type = "Metagenomics"'
# now wrap that to get the accession ids using a subselect
query = 'select run_accession from run where experiment_accession in (' + query + ');'
sys.stderr.write("Executing query\n" + query + "\n")
for row in sql.execute(query):
print(str(row[0]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='get the study types')
parser.add_argument('-d', help='SQLLite databasae')
args = parser.parse_args()
# count_study_types(args.d)
# count_metagenome_types(args.d)
print("NON AMPLICON RUNS")
get_metagenome_run_ids(args.d, False)
print("\nAMPLICON RUNS")
get_metagenome_run_ids(args.d, True)
| mit |
pravsripad/mne-python | mne/tests/test_cov.py | 5 | 35977 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD-3-Clause
import os.path as op
import itertools as itt
import sys
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_allclose)
import pytest
import numpy as np
from scipy import linalg
from mne.cov import (regularize, whiten_evoked,
_auto_low_rank_model,
prepare_noise_cov, compute_whitener,
_regularized_covariance)
from mne import (read_cov, write_cov, Epochs, merge_events,
find_events, compute_raw_covariance,
compute_covariance, read_evokeds, compute_proj_raw,
pick_channels_cov, pick_types, make_ad_hoc_cov,
make_fixed_length_events, create_info, compute_rank)
from mne.channels import equalize_channels
from mne.datasets import testing
from mne.fixes import _get_args
from mne.io import read_raw_fif, RawArray, read_raw_ctf, read_info
from mne.io.pick import _DATA_CH_TYPES_SPLIT, pick_info
from mne.preprocessing import maxwell_filter
from mne.rank import _compute_rank_int
from mne.utils import (requires_sklearn, catch_logging, assert_snr,
_record_warnings)
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
cov_fname = op.join(base_dir, 'test-cov.fif')
cov_gz_fname = op.join(base_dir, 'test-cov.fif.gz')
cov_km_fname = op.join(base_dir, 'test-km-cov.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
ave_fname = op.join(base_dir, 'test-ave.fif')
erm_cov_fname = op.join(base_dir, 'test_erm-cov.fif')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
ctf_fname = op.join(testing.data_path(download=False), 'CTF',
'testdata_ctf.ds')
@pytest.mark.parametrize('proj', (True, False))
@pytest.mark.parametrize('pca', (True, 'white', False))
def test_compute_whitener(proj, pca):
"""Test properties of compute_whitener."""
raw = read_raw_fif(raw_fname).crop(0, 3).load_data()
raw.pick_types(meg=True, eeg=True, exclude=())
if proj:
raw.apply_proj()
else:
raw.del_proj()
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_raw_covariance(raw)
assert cov['names'] == raw.ch_names
W, _, C = compute_whitener(cov, raw.info, pca=pca, return_colorer=True,
verbose='error')
n_channels = len(raw.ch_names)
n_reduced = len(raw.ch_names)
rank = n_channels - len(raw.info['projs'])
n_reduced = rank if pca is True else n_channels
assert W.shape == C.shape[::-1] == (n_reduced, n_channels)
# round-trip mults
round_trip = np.dot(W, C)
if pca is True:
assert_allclose(round_trip, np.eye(n_reduced), atol=1e-7)
elif pca == 'white':
# Our first few rows/cols are zeroed out in the white space
assert_allclose(round_trip[-rank:, -rank:],
np.eye(rank), atol=1e-7)
else:
assert pca is False
assert_allclose(round_trip, np.eye(n_channels), atol=0.05)
raw.info['bads'] = [raw.ch_names[0]]
picks = pick_types(raw.info, meg=True, eeg=True, exclude=[])
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov2 = compute_raw_covariance(raw, picks=picks)
cov3 = compute_raw_covariance(raw, picks=None)
assert_allclose(cov2['data'][1:, 1:], cov3['data'])
W2, _, C2 = compute_whitener(cov2, raw.info, pca=pca, return_colorer=True,
picks=picks, verbose='error')
W3, _, C3 = compute_whitener(cov3, raw.info, pca=pca, return_colorer=True,
picks=None, verbose='error')
# this tol is not great, but Windows needs it
rtol = 1e-3 if sys.platform.startswith('win') else 1e-11
assert_allclose(W, W2, rtol=rtol)
assert_allclose(C, C2, rtol=rtol)
n_channels = len(raw.ch_names) - len(raw.info['bads'])
n_reduced = len(raw.ch_names) - len(raw.info['bads'])
rank = n_channels - len(raw.info['projs'])
n_reduced = rank if pca is True else n_channels
assert W3.shape == C3.shape[::-1] == (n_reduced, n_channels)
def test_cov_mismatch():
"""Test estimation with MEG<->Head mismatch."""
raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
events = find_events(raw, stim_channel='STI 014')
raw.pick_channels(raw.ch_names[:5])
raw.add_proj([], remove_existing=True)
epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True)
for kind in ('shift', 'None'):
epochs_2 = epochs.copy()
# This should be fine
compute_covariance([epochs, epochs_2])
if kind == 'shift':
epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001
else: # None
epochs_2.info['dev_head_t'] = None
pytest.raises(ValueError, compute_covariance, [epochs, epochs_2])
compute_covariance([epochs, epochs_2], on_mismatch='ignore')
with pytest.warns(RuntimeWarning, match='transform mismatch'):
compute_covariance([epochs, epochs_2], on_mismatch='warn')
with pytest.raises(ValueError, match='Invalid value'):
compute_covariance(epochs, on_mismatch='x')
# This should work
epochs.info['dev_head_t'] = None
epochs_2.info['dev_head_t'] = None
compute_covariance([epochs, epochs_2], method=None)
def test_cov_order():
"""Test covariance ordering."""
raw = read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True)
info = raw.info
# add MEG channel with low enough index number to affect EEG if
# order is incorrect
info['bads'] += ['MEG 0113']
ch_names = [info['ch_names'][pick]
for pick in pick_types(info, meg=False, eeg=True)]
cov = read_cov(cov_fname)
# no avg ref present warning
prepare_noise_cov(cov, info, ch_names, verbose='error')
# big reordering
cov_reorder = cov.copy()
order = np.random.RandomState(0).permutation(np.arange(len(cov.ch_names)))
cov_reorder['names'] = [cov['names'][ii] for ii in order]
cov_reorder['data'] = cov['data'][order][:, order]
# Make sure we did this properly
_assert_reorder(cov_reorder, cov, order)
# Now check some functions that should get the same result for both
# regularize
with pytest.raises(ValueError, match='rank, if str'):
regularize(cov, info, rank='foo')
with pytest.raises(TypeError, match='rank must be'):
regularize(cov, info, rank=False)
with pytest.raises(TypeError, match='rank must be'):
regularize(cov, info, rank=1.)
cov_reg = regularize(cov, info, rank='full')
cov_reg_reorder = regularize(cov_reorder, info, rank='full')
_assert_reorder(cov_reg_reorder, cov_reg, order)
# prepare_noise_cov
cov_prep = prepare_noise_cov(cov, info, ch_names)
cov_prep_reorder = prepare_noise_cov(cov, info, ch_names)
_assert_reorder(cov_prep, cov_prep_reorder,
order=np.arange(len(cov_prep['names'])))
# compute_whitener
whitener, w_ch_names, n_nzero = compute_whitener(
cov, info, return_rank=True)
assert whitener.shape[0] == whitener.shape[1]
whitener_2, w_ch_names_2, n_nzero_2 = compute_whitener(
cov_reorder, info, return_rank=True)
assert_array_equal(w_ch_names_2, w_ch_names)
assert_allclose(whitener_2, whitener, rtol=1e-6)
assert n_nzero == n_nzero_2
# with pca
assert n_nzero < whitener.shape[0]
whitener_pca, w_ch_names_pca, n_nzero_pca = compute_whitener(
cov, info, pca=True, return_rank=True)
assert_array_equal(w_ch_names_pca, w_ch_names)
assert n_nzero_pca == n_nzero
assert whitener_pca.shape == (n_nzero_pca, len(w_ch_names))
# whiten_evoked
evoked = read_evokeds(ave_fname)[0]
evoked_white = whiten_evoked(evoked, cov)
evoked_white_2 = whiten_evoked(evoked, cov_reorder)
assert_allclose(evoked_white_2.data, evoked_white.data, atol=1e-7)
def _assert_reorder(cov_new, cov_orig, order):
"""Check that we get the same result under reordering."""
inv_order = np.argsort(order)
assert_array_equal([cov_new['names'][ii] for ii in inv_order],
cov_orig['names'])
assert_allclose(cov_new['data'][inv_order][:, inv_order],
cov_orig['data'], atol=1e-20)
def test_ad_hoc_cov(tmp_path):
"""Test ad hoc cov creation and I/O."""
out_fname = tmp_path / 'test-cov.fif'
evoked = read_evokeds(ave_fname)[0]
cov = make_ad_hoc_cov(evoked.info)
cov.save(out_fname)
assert 'Covariance' in repr(cov)
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
std = dict(grad=2e-13, mag=10e-15, eeg=0.1e-6)
cov = make_ad_hoc_cov(evoked.info, std)
cov.save(out_fname, overwrite=True)
assert 'Covariance' in repr(cov)
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
cov['data'] = np.diag(cov['data'])
with pytest.raises(RuntimeError, match='attributes inconsistent'):
cov._get_square()
cov['diag'] = False
cov._get_square()
cov['data'] = np.diag(cov['data'])
with pytest.raises(RuntimeError, match='attributes inconsistent'):
cov._get_square()
def test_io_cov(tmp_path):
"""Test IO for noise covariance matrices."""
cov = read_cov(cov_fname)
cov['method'] = 'empirical'
cov['loglik'] = -np.inf
cov.save(tmp_path / 'test-cov.fif')
cov2 = read_cov(tmp_path / 'test-cov.fif')
assert_array_almost_equal(cov.data, cov2.data)
assert_equal(cov['method'], cov2['method'])
assert_equal(cov['loglik'], cov2['loglik'])
assert 'Covariance' in repr(cov)
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(tmp_path / 'test-cov.fif.gz')
cov2 = read_cov(tmp_path / 'test-cov.fif.gz')
assert_array_almost_equal(cov.data, cov2.data)
cov['bads'] = ['EEG 039']
cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
assert cov_sel['dim'] == (len(cov['data']) - len(cov['bads']))
assert cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim'])
cov_sel.save(tmp_path / 'test-cov.fif', overwrite=True)
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(tmp_path / 'test-cov.fif.gz', overwrite=True)
cov2 = read_cov(tmp_path / 'test-cov.fif.gz')
assert_array_almost_equal(cov.data, cov2.data)
# test warnings on bad filenames
cov_badname = tmp_path / 'test-bad-name.fif.gz'
with pytest.warns(RuntimeWarning, match='-cov.fif'):
write_cov(cov_badname, cov)
with pytest.warns(RuntimeWarning, match='-cov.fif'):
read_cov(cov_badname)
@pytest.mark.parametrize('method', [
None,
'empirical',
pytest.param('shrunk', marks=pytest.mark.slowtest),
])
def test_cov_estimation_on_raw(method, tmp_path):
"""Test estimation from raw (typically empty room)."""
if method == 'shrunk':
try:
import sklearn # noqa: F401
except Exception as exp:
pytest.skip('sklearn is required, got %s' % (exp,))
raw = read_raw_fif(raw_fname, preload=True)
cov_mne = read_cov(erm_cov_fname)
method_params = dict(shrunk=dict(shrinkage=[0]))
# The pure-string uses the more efficient numpy-based method, the
# the list gets triaged to compute_covariance (should be equivalent
# but use more memory)
with _record_warnings(): # can warn about EEG ref
cov = compute_raw_covariance(
raw, tstep=None, method=method, rank='full',
method_params=method_params)
assert_equal(cov.ch_names, cov_mne.ch_names)
assert_equal(cov.nfree, cov_mne.nfree)
assert_snr(cov.data, cov_mne.data, 1e6)
# test equivalence with np.cov
cov_np = np.cov(raw.copy().pick_channels(cov['names']).get_data(), ddof=1)
if method != 'shrunk': # can check all
off_diag = np.triu_indices(cov_np.shape[0])
else:
# We explicitly zero out off-diag entries between channel types,
# so let's just check MEG off-diag entries
off_diag = np.triu_indices(len(pick_types(raw.info, meg=True,
exclude=())))
for other in (cov_mne, cov):
assert_allclose(np.diag(cov_np), np.diag(other.data), rtol=5e-6)
assert_allclose(cov_np[off_diag], other.data[off_diag], rtol=4e-3)
assert_snr(cov.data, other.data, 1e6)
# tstep=0.2 (default)
with _record_warnings(): # can warn about EEG ref
cov = compute_raw_covariance(raw, method=method, rank='full',
method_params=method_params)
assert_equal(cov.nfree, cov_mne.nfree - 120) # cutoff some samples
assert_snr(cov.data, cov_mne.data, 170)
# test IO when computation done in Python
cov.save(tmp_path / 'test-cov.fif') # test saving
cov_read = read_cov(tmp_path / 'test-cov.fif')
assert cov_read.ch_names == cov.ch_names
assert cov_read.nfree == cov.nfree
assert_array_almost_equal(cov.data, cov_read.data)
# test with a subset of channels
raw_pick = raw.copy().pick_channels(raw.ch_names[:5])
raw_pick.info.normalize_proj()
cov = compute_raw_covariance(raw_pick, tstep=None, method=method,
rank='full', method_params=method_params)
assert cov_mne.ch_names[:5] == cov.ch_names
assert_snr(cov.data, cov_mne.data[:5, :5], 5e6)
cov = compute_raw_covariance(raw_pick, method=method, rank='full',
method_params=method_params)
assert_snr(cov.data, cov_mne.data[:5, :5], 90) # cutoff samps
# make sure we get a warning with too short a segment
raw_2 = read_raw_fif(raw_fname).crop(0, 1)
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_raw_covariance(raw_2, method=method,
method_params=method_params)
# no epochs found due to rejection
pytest.raises(ValueError, compute_raw_covariance, raw, tstep=None,
method='empirical', reject=dict(eog=200e-6))
# but this should work
with _record_warnings(): # sklearn
cov = compute_raw_covariance(
raw.copy().crop(0, 10.), tstep=None, method=method,
reject=dict(eog=1000e-6), method_params=method_params,
verbose='error')
@pytest.mark.slowtest
@requires_sklearn
def test_cov_estimation_on_raw_reg():
"""Test estimation from raw with regularization."""
raw = read_raw_fif(raw_fname, preload=True)
with raw.info._unlock():
raw.info['sfreq'] /= 10.
raw = RawArray(raw._data[:, ::10].copy(), raw.info) # decimate for speed
cov_mne = read_cov(erm_cov_fname)
with pytest.warns(RuntimeWarning, match='Too few samples'):
# "diagonal_fixed" is much faster. Use long epochs for speed.
cov = compute_raw_covariance(raw, tstep=5., method='diagonal_fixed')
assert_snr(cov.data, cov_mne.data, 5)
def _assert_cov(cov, cov_desired, tol=0.005, nfree=True):
assert_equal(cov.ch_names, cov_desired.ch_names)
err = (linalg.norm(cov.data - cov_desired.data, ord='fro') /
linalg.norm(cov.data, ord='fro'))
assert err < tol, '%s >= %s' % (err, tol)
if nfree:
assert_equal(cov.nfree, cov_desired.nfree)
@pytest.mark.slowtest
@pytest.mark.parametrize('rank', ('full', None))
def test_cov_estimation_with_triggers(rank, tmp_path):
"""Test estimation from raw with triggers."""
raw = read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True).load_data()
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True,
reject=reject, preload=True)
cov = compute_covariance(epochs, keep_sample_mean=True)
cov_km = read_cov(cov_km_fname)
# adjust for nfree bug
cov_km['nfree'] -= 1
_assert_cov(cov, cov_km)
# Test with tmin and tmax (different but not too much)
cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
assert np.all(cov.data != cov_tmin_tmax.data)
err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') /
linalg.norm(cov_tmin_tmax.data, ord='fro'))
assert err < 0.05
# cov using a list of epochs and keep_sample_mean=True
epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject)
for ev_id in event_ids]
cov2 = compute_covariance(epochs, keep_sample_mean=True)
assert_array_almost_equal(cov.data, cov2.data)
assert cov.ch_names == cov2.ch_names
# cov with keep_sample_mean=False using a list of epochs
cov = compute_covariance(epochs, keep_sample_mean=False)
assert cov_km.nfree == cov.nfree
_assert_cov(cov, read_cov(cov_fname), nfree=False)
method_params = {'empirical': {'assume_centered': False}}
pytest.raises(ValueError, compute_covariance, epochs,
keep_sample_mean=False, method_params=method_params)
pytest.raises(ValueError, compute_covariance, epochs,
keep_sample_mean=False, method='shrunk', rank=rank)
# test IO when computation done in Python
cov.save(tmp_path / 'test-cov.fif') # test saving
cov_read = read_cov(tmp_path / 'test-cov.fif')
_assert_cov(cov, cov_read, 1e-5)
# cov with list of epochs with different projectors
epochs = [Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True),
Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=False)]
# these should fail
pytest.raises(ValueError, compute_covariance, epochs)
pytest.raises(ValueError, compute_covariance, epochs, projs=None)
# these should work, but won't be equal to above
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_covariance(epochs, projs=[])
# test new dict support
epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.01, tmax=0,
proj=True, reject=reject, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
compute_covariance(epochs)
with pytest.warns(RuntimeWarning, match='Too few samples'):
compute_covariance(epochs, projs=[])
pytest.raises(TypeError, compute_covariance, epochs, projs='foo')
pytest.raises(TypeError, compute_covariance, epochs, projs=['foo'])
def test_arithmetic_cov():
"""Test arithmetic with noise covariance matrices."""
cov = read_cov(cov_fname)
cov_sum = cov + cov
assert_array_almost_equal(2 * cov.nfree, cov_sum.nfree)
assert_array_almost_equal(2 * cov.data, cov_sum.data)
assert cov.ch_names == cov_sum.ch_names
cov += cov
assert_array_almost_equal(cov_sum.nfree, cov.nfree)
assert_array_almost_equal(cov_sum.data, cov.data)
assert cov_sum.ch_names == cov.ch_names
def test_regularize_cov():
"""Test cov regularization."""
raw = read_raw_fif(raw_fname)
raw.info['bads'].append(raw.ch_names[0]) # test with bad channels
noise_cov = read_cov(cov_fname)
# Regularize noise cov
reg_noise_cov = regularize(noise_cov, raw.info,
mag=0.1, grad=0.1, eeg=0.1, proj=True,
exclude='bads', rank='full')
assert noise_cov['dim'] == reg_noise_cov['dim']
assert noise_cov['data'].shape == reg_noise_cov['data'].shape
assert np.mean(noise_cov['data'] < reg_noise_cov['data']) < 0.08
# make sure all args are represented
assert set(_DATA_CH_TYPES_SPLIT) - set(_get_args(regularize)) == set()
def test_whiten_evoked():
"""Test whitening of evoked data."""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
cov = read_cov(cov_fname)
###########################################################################
# Show result
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
noise_cov = regularize(cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1,
exclude='bads', rank='full')
evoked_white = whiten_evoked(evoked, noise_cov, picks, diag=True)
whiten_baseline_data = evoked_white.data[picks][:, evoked.times < 0]
mean_baseline = np.mean(np.abs(whiten_baseline_data), axis=1)
assert np.all(mean_baseline < 1.)
assert np.all(mean_baseline > 0.2)
# degenerate
cov_bad = pick_channels_cov(cov, include=evoked.ch_names[:10])
pytest.raises(RuntimeError, whiten_evoked, evoked, cov_bad, picks)
def test_regularized_covariance():
"""Test unchanged data with regularized_covariance."""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
data = evoked.data.copy()
# check that input data remain unchanged. gh-5698
_regularized_covariance(data)
assert_allclose(data, evoked.data, atol=1e-20)
@requires_sklearn
def test_auto_low_rank():
"""Test probabilistic low rank estimators."""
n_samples, n_features, rank = 400, 10, 5
sigma = 0.1
def get_data(n_samples, n_features, rank, sigma):
rng = np.random.RandomState(42)
W = rng.randn(n_features, n_features)
X = rng.randn(n_samples, rank)
U, _, _ = linalg.svd(W.copy())
X = np.dot(X, U[:, :rank].T)
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X += rng.randn(n_samples, n_features) * sigmas
return X
X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
sigma=sigma)
method_params = {'iter_n_components': [4, 5, 6]}
cv = 3
n_jobs = 1
mode = 'factor_analysis'
rescale = 1e8
X *= rescale
est, info = _auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
method_params=method_params,
cv=cv)
assert_equal(info['best'], rank)
X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
sigma=sigma)
method_params = {'iter_n_components': [n_features + 5]}
msg = ('You are trying to estimate %i components on matrix '
'with %i features.') % (n_features + 5, n_features)
with pytest.warns(RuntimeWarning, match=msg):
_auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
method_params=method_params, cv=cv)
@pytest.mark.slowtest
@pytest.mark.parametrize('rank', ('full', None, 'info'))
@requires_sklearn
def test_compute_covariance_auto_reg(rank):
"""Test automated regularization."""
raw = read_raw_fif(raw_fname, preload=True)
raw.resample(100, npad='auto') # much faster estimation
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(mag=4e-12)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
# we need a few channels for numerical reasons in PCA/FA
picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
raw.pick_channels([raw.ch_names[pick] for pick in picks])
raw.info.normalize_proj()
epochs = Epochs(
raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True)
epochs = epochs.crop(None, 0)[:5]
method_params = dict(factor_analysis=dict(iter_n_components=[3]),
pca=dict(iter_n_components=[3]))
covs = compute_covariance(epochs, method='auto',
method_params=method_params,
return_estimators=True, rank=rank)
# make sure regularization produces structured differences
diag_mask = np.eye(len(epochs.ch_names)).astype(bool)
off_diag_mask = np.invert(diag_mask)
for cov_a, cov_b in itt.combinations(covs, 2):
if (cov_a['method'] == 'diagonal_fixed' and
# here we have diagonal or no regularization.
cov_b['method'] == 'empirical' and rank == 'full'):
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
# but the rest is the same
assert_allclose(cov_a['data'][off_diag_mask],
cov_b['data'][off_diag_mask], rtol=1e-12)
else:
# and here we have shrinkage everywhere.
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
logliks = [c['loglik'] for c in covs]
assert np.diff(logliks).max() <= 0 # descending order
methods = ['empirical', 'ledoit_wolf', 'oas', 'shrunk', 'shrinkage']
if rank == 'full':
methods.extend(['factor_analysis', 'pca'])
with catch_logging() as log:
cov3 = compute_covariance(epochs, method=methods,
method_params=method_params, projs=None,
return_estimators=True, rank=rank,
verbose=True)
log = log.getvalue().split('\n')
if rank is None:
assert ' Setting small MAG eigenvalues to zero (without PCA)' in log
assert 'Reducing data rank from 10 -> 7' in log
else:
assert 'Reducing' not in log
method_names = [cov['method'] for cov in cov3]
best_bounds = [-45, -35]
bounds = [-55, -45] if rank == 'full' else best_bounds
for method in set(methods) - {'empirical', 'shrunk'}:
this_lik = cov3[method_names.index(method)]['loglik']
assert bounds[0] < this_lik < bounds[1]
this_lik = cov3[method_names.index('shrunk')]['loglik']
assert best_bounds[0] < this_lik < best_bounds[1]
this_lik = cov3[method_names.index('empirical')]['loglik']
bounds = [-110, -100] if rank == 'full' else best_bounds
assert bounds[0] < this_lik < bounds[1]
assert_equal({c['method'] for c in cov3}, set(methods))
cov4 = compute_covariance(epochs, method=methods,
method_params=method_params, projs=None,
return_estimators=False, rank=rank)
assert cov3[0]['method'] == cov4['method'] # ordering
# invalid prespecified method
pytest.raises(ValueError, compute_covariance, epochs, method='pizza')
# invalid scalings
pytest.raises(ValueError, compute_covariance, epochs, method='shrunk',
scalings=dict(misc=123))
def _cov_rank(cov, info, proj=True):
# ignore warnings about rank mismatches: sometimes we will intentionally
# violate the computed/info assumption, such as when using SSS with
# `rank='full'`
with _record_warnings():
return _compute_rank_int(cov, info=info, proj=proj)
@pytest.fixture(scope='module')
def raw_epochs_events():
"""Create raw, epochs, and events for tests."""
raw = read_raw_fif(raw_fname).set_eeg_reference(projection=True).crop(0, 3)
raw = maxwell_filter(raw, regularize=None) # heavily reduce the rank
assert raw.info['bads'] == [] # no bads
events = make_fixed_length_events(raw)
epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)
return (raw, epochs, events)
@requires_sklearn
@pytest.mark.parametrize('rank', (None, 'full', 'info'))
def test_low_rank_methods(rank, raw_epochs_events):
"""Test low-rank covariance matrix estimation."""
epochs = raw_epochs_events[1]
sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj
n_ch = 366
methods = ('empirical', 'diagonal_fixed', 'oas')
bounds = {
'None': dict(empirical=(-15000, -5000),
diagonal_fixed=(-1500, -500),
oas=(-700, -600)),
'full': dict(empirical=(-18000, -8000),
diagonal_fixed=(-2000, -1600),
oas=(-1600, -1000)),
'info': dict(empirical=(-15000, -5000),
diagonal_fixed=(-700, -600),
oas=(-700, -600)),
}
with pytest.warns(RuntimeWarning, match='Too few samples'):
covs = compute_covariance(
epochs, method=methods, return_estimators=True, rank=rank,
verbose=True)
for cov in covs:
method = cov['method']
these_bounds = bounds[str(rank)][method]
this_rank = _cov_rank(cov, epochs.info, proj=(rank != 'full'))
if rank == 'full' and method != 'empirical':
assert this_rank == n_ch
else:
assert this_rank == sss_proj_rank
assert these_bounds[0] < cov['loglik'] < these_bounds[1], \
(rank, method)
@requires_sklearn
def test_low_rank_cov(raw_epochs_events):
"""Test additional properties of low rank computations."""
raw, epochs, events = raw_epochs_events
sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj
n_ch = 366
proj_rank = 365 # one EEG proj
with pytest.warns(RuntimeWarning, match='Too few samples'):
emp_cov = compute_covariance(epochs)
# Test equivalence with mne.cov.regularize subspace
with pytest.raises(ValueError, match='are dependent.*must equal'):
regularize(emp_cov, epochs.info, rank=None, mag=0.1, grad=0.2)
assert _cov_rank(emp_cov, epochs.info) == sss_proj_rank
reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')
assert _cov_rank(reg_cov, epochs.info) == proj_rank
with pytest.warns(RuntimeWarning, match='exceeds the theoretical'):
_compute_rank_int(reg_cov, info=epochs.info)
del reg_cov
with catch_logging() as log:
reg_r_cov = regularize(emp_cov, epochs.info, proj=True, rank=None,
verbose=True)
log = log.getvalue()
assert 'jointly' in log
assert _cov_rank(reg_r_cov, epochs.info) == sss_proj_rank
reg_r_only_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)
assert _cov_rank(reg_r_only_cov, epochs.info) == sss_proj_rank
assert_allclose(reg_r_only_cov['data'], reg_r_cov['data'])
del reg_r_only_cov, reg_r_cov
# test that rank=306 is same as rank='full'
epochs_meg = epochs.copy().pick_types(meg=True)
assert len(epochs_meg.ch_names) == 306
with epochs_meg.info._unlock():
epochs_meg.info.update(bads=[], projs=[])
cov_full = compute_covariance(epochs_meg, method='oas',
rank='full', verbose='error')
assert _cov_rank(cov_full, epochs_meg.info) == 306
with pytest.warns(RuntimeWarning, match='few samples'):
cov_dict = compute_covariance(epochs_meg, method='oas',
rank=dict(meg=306))
assert _cov_rank(cov_dict, epochs_meg.info) == 306
assert_allclose(cov_full['data'], cov_dict['data'])
cov_dict = compute_covariance(epochs_meg, method='oas',
rank=dict(meg=306), verbose='error')
assert _cov_rank(cov_dict, epochs_meg.info) == 306
assert_allclose(cov_full['data'], cov_dict['data'])
# Work with just EEG data to simplify projection / rank reduction
raw = raw.copy().pick_types(meg=False, eeg=True)
n_proj = 2
raw.add_proj(compute_proj_raw(raw, n_eeg=n_proj))
n_ch = len(raw.ch_names)
rank = n_ch - n_proj - 1 # plus avg proj
assert len(raw.info['projs']) == 3
epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)
assert len(raw.ch_names) == n_ch
emp_cov = compute_covariance(epochs, rank='full', verbose='error')
assert _cov_rank(emp_cov, epochs.info) == rank
reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')
assert _cov_rank(reg_cov, epochs.info) == rank
reg_r_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)
assert _cov_rank(reg_r_cov, epochs.info) == rank
dia_cov = compute_covariance(epochs, rank=None, method='diagonal_fixed',
verbose='error')
assert _cov_rank(dia_cov, epochs.info) == rank
assert_allclose(dia_cov['data'], reg_cov['data'])
epochs.pick_channels(epochs.ch_names[:103])
# degenerate
with pytest.raises(ValueError, match='can.*only be used with rank="full"'):
compute_covariance(epochs, rank=None, method='pca')
with pytest.raises(ValueError, match='can.*only be used with rank="full"'):
compute_covariance(epochs, rank=None, method='factor_analysis')
@testing.requires_testing_data
@requires_sklearn
def test_cov_ctf():
"""Test basic cov computation on ctf data with/without compensation."""
raw = read_raw_ctf(ctf_fname).crop(0., 2.).load_data()
events = make_fixed_length_events(raw, 99999)
assert len(events) == 2
ch_names = [raw.info['ch_names'][pick]
for pick in pick_types(raw.info, meg=True, eeg=False,
ref_meg=False)]
for comp in [0, 1]:
raw.apply_gradient_compensation(comp)
epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
noise_cov = compute_covariance(epochs, tmax=0.,
method=['empirical'])
prepare_noise_cov(noise_cov, raw.info, ch_names)
raw.apply_gradient_compensation(0)
epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
noise_cov = compute_covariance(epochs, tmax=0., method=['empirical'])
raw.apply_gradient_compensation(1)
# TODO This next call in principle should fail.
prepare_noise_cov(noise_cov, raw.info, ch_names)
# make sure comps matrices was not removed from raw
assert raw.info['comps'], 'Comps matrices removed'
def test_equalize_channels():
"""Test equalization of channels for instances of Covariance."""
cov1 = make_ad_hoc_cov(create_info(['CH1', 'CH2', 'CH3', 'CH4'], sfreq=1.0,
ch_types='eeg'))
cov2 = make_ad_hoc_cov(create_info(['CH5', 'CH1', 'CH2'], sfreq=1.0,
ch_types='eeg'))
cov1, cov2 = equalize_channels([cov1, cov2])
assert cov1.ch_names == ['CH1', 'CH2']
assert cov2.ch_names == ['CH1', 'CH2']
def test_compute_whitener_rank():
"""Test risky rank options."""
info = read_info(ave_fname)
info = pick_info(info, pick_types(info, meg=True))
with info._unlock():
info['projs'] = []
# need a square version because the diag one takes shortcuts in
# compute_whitener (users shouldn't even need this function so it's
# private)
cov = make_ad_hoc_cov(info)._as_square()
assert len(cov['names']) == 306
_, _, rank = compute_whitener(cov, info, rank=None, return_rank=True)
assert rank == 306
assert compute_rank(cov, info=info, verbose=True) == dict(meg=rank)
cov['data'][-1] *= 1e-14 # trivially rank-deficient
_, _, rank = compute_whitener(cov, info, rank=None, return_rank=True)
assert rank == 305
assert compute_rank(cov, info=info, verbose=True) == dict(meg=rank)
# this should emit a warning
with pytest.warns(RuntimeWarning, match='exceeds the estimated'):
_, _, rank = compute_whitener(cov, info, rank=dict(meg=306),
return_rank=True)
assert rank == 306
| bsd-3-clause |
pravsripad/mne-python | mne/decoding/tests/test_csp.py | 11 | 13481 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Romain Trachel <trachelr@gmail.com>
# Alexandre Barachant <alexandre.barachant@gmail.com>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
import pytest
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from mne import io, Epochs, read_events, pick_types
from mne.decoding.csp import CSP, _ajd_pham, SPoC
from mne.utils import requires_sklearn
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# if stop is too small pca may fail in some cases, but we're okay on this file
start, stop = 0, 8
def simulate_data(target, n_trials=100, n_channels=10, random_state=42):
"""Simulate data according to an instantaneous mixin model.
Data are simulated in the statistical source space, where one source is
modulated according to a target variable, before being mixed with a
random mixing matrix.
"""
rs = np.random.RandomState(random_state)
# generate a orthogonal mixin matrix
mixing_mat = np.linalg.svd(rs.randn(n_channels, n_channels))[0]
S = rs.randn(n_trials, n_channels, 50)
S[:, 0] *= np.atleast_2d(np.sqrt(target)).T
S[:, 1:] *= 0.01 # less noise
X = np.dot(mixing_mat, S).transpose((1, 0, 2))
return X, mixing_mat
def deterministic_toy_data(classes=('class_a', 'class_b')):
"""Generate a small deterministic toy data set.
Four independent sources are modulated by the target class and mixed
into signal space.
"""
sources_a = np.array([[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]],
dtype=float) * 2 - 1
sources_b = np.array([[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]],
dtype=float) * 2 - 1
sources_a[0, :] *= 1
sources_a[1, :] *= 2
sources_b[2, :] *= 3
sources_b[3, :] *= 4
mixing = np.array([[1.0, 0.8, 0.6, 0.4],
[0.8, 1.0, 0.8, 0.6],
[0.6, 0.8, 1.0, 0.8],
[0.4, 0.6, 0.8, 1.0]])
x_class_a = mixing @ sources_a
x_class_b = mixing @ sources_b
x = np.stack([x_class_a, x_class_b])
y = np.array(classes)
return x, y
@pytest.mark.slowtest
def test_csp():
"""Test Common Spatial Patterns algorithm on epochs."""
raw = io.read_raw_fif(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[2:12:3] # subselect channels -> disable proj!
raw.add_proj([], remove_existing=True)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, proj=False)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
y = epochs.events[:, -1]
# Init
pytest.raises(ValueError, CSP, n_components='foo', norm_trace=False)
for reg in ['foo', -0.1, 1.1]:
csp = CSP(reg=reg, norm_trace=False)
pytest.raises(ValueError, csp.fit, epochs_data, epochs.events[:, -1])
for reg in ['oas', 'ledoit_wolf', 0, 0.5, 1.]:
CSP(reg=reg, norm_trace=False)
for cov_est in ['foo', None]:
pytest.raises(ValueError, CSP, cov_est=cov_est, norm_trace=False)
with pytest.raises(TypeError, match='instance of bool'):
CSP(norm_trace='foo')
for cov_est in ['concat', 'epoch']:
CSP(cov_est=cov_est, norm_trace=False)
n_components = 3
# Fit
for norm_trace in [True, False]:
csp = CSP(n_components=n_components, norm_trace=norm_trace)
csp.fit(epochs_data, epochs.events[:, -1])
assert_equal(len(csp.mean_), n_components)
assert_equal(len(csp.std_), n_components)
# Transform
X = csp.fit_transform(epochs_data, y)
sources = csp.transform(epochs_data)
assert (sources.shape[1] == n_components)
assert (csp.filters_.shape == (n_channels, n_channels))
assert (csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(sources, X)
# Test data exception
pytest.raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
pytest.raises(ValueError, csp.fit, epochs, y)
pytest.raises(ValueError, csp.transform, epochs)
# Test plots
epochs.pick_types(meg='mag')
cmap = ('RdBu', True)
components = np.arange(n_components)
for plot in (csp.plot_patterns, csp.plot_filters):
plot(epochs.info, components=components, res=12, show=False, cmap=cmap)
# Test with more than 2 classes
epochs = Epochs(raw, events, tmin=tmin, tmax=tmax, picks=picks,
event_id=dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4),
baseline=(None, 0), proj=False, preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_channels = epochs_data.shape[1]
for cov_est in ['concat', 'epoch']:
csp = CSP(n_components=n_components, cov_est=cov_est, norm_trace=False)
csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data)
assert_equal(len(csp._classes), 4)
assert_array_equal(csp.filters_.shape, [n_channels, n_channels])
assert_array_equal(csp.patterns_.shape, [n_channels, n_channels])
# Test average power transform
n_components = 2
assert (csp.transform_into == 'average_power')
feature_shape = [len(epochs_data), n_components]
X_trans = dict()
for log in (None, True, False):
csp = CSP(n_components=n_components, log=log, norm_trace=False)
assert (csp.log is log)
Xt = csp.fit_transform(epochs_data, epochs.events[:, 2])
assert_array_equal(Xt.shape, feature_shape)
X_trans[str(log)] = Xt
# log=None => log=True
assert_array_almost_equal(X_trans['None'], X_trans['True'])
# Different normalization return different transform
assert (np.sum((X_trans['True'] - X_trans['False']) ** 2) > 1.)
# Check wrong inputs
pytest.raises(ValueError, CSP, transform_into='average_power', log='foo')
# Test csp space transform
csp = CSP(transform_into='csp_space', norm_trace=False)
assert (csp.transform_into == 'csp_space')
for log in ('foo', True, False):
pytest.raises(ValueError, CSP, transform_into='csp_space', log=log,
norm_trace=False)
n_components = 2
csp = CSP(n_components=n_components, transform_into='csp_space',
norm_trace=False)
Xt = csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data)
feature_shape = [len(epochs_data), n_components, epochs_data.shape[2]]
assert_array_equal(Xt.shape, feature_shape)
# Check mixing matrix on simulated data
y = np.array([100] * 50 + [1] * 50)
X, A = simulate_data(y)
for cov_est in ['concat', 'epoch']:
# fit csp
csp = CSP(n_components=1, cov_est=cov_est, norm_trace=False)
csp.fit(X, y)
# check the first pattern match the mixing matrix
# the sign might change
corr = np.abs(np.corrcoef(csp.patterns_[0, :].T, A[:, 0])[0, 1])
assert np.abs(corr) > 0.99
# check output
out = csp.transform(X)
corr = np.abs(np.corrcoef(out[:, 0], y)[0, 1])
assert np.abs(corr) > 0.95
@requires_sklearn
def test_regularized_csp():
"""Test Common Spatial Patterns algorithm using regularized covariance."""
raw = io.read_raw_fif(raw_fname)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_components = 3
reg_cov = [None, 0.05, 'ledoit_wolf', 'oas']
for reg in reg_cov:
csp = CSP(n_components=n_components, reg=reg, norm_trace=False,
rank=None)
csp.fit(epochs_data, epochs.events[:, -1])
y = epochs.events[:, -1]
X = csp.fit_transform(epochs_data, y)
assert (csp.filters_.shape == (n_channels, n_channels))
assert (csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(csp.fit(epochs_data, y).
transform(epochs_data), X)
# test init exception
pytest.raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
pytest.raises(ValueError, csp.fit, epochs, y)
pytest.raises(ValueError, csp.transform, epochs)
csp.n_components = n_components
sources = csp.transform(epochs_data)
assert (sources.shape[1] == n_components)
@requires_sklearn
def test_csp_pipeline():
"""Test if CSP works in a pipeline."""
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
csp = CSP(reg=1, norm_trace=False)
svc = SVC()
pipe = Pipeline([("CSP", csp), ("SVC", svc)])
pipe.set_params(CSP__reg=0.2)
assert (pipe.get_params()["CSP__reg"] == 0.2)
def test_ajd():
"""Test approximate joint diagonalization."""
# The implementation shuold obtain the same
# results as the Matlab implementation by Pham Dinh-Tuan.
# Generate a set of cavariances matrices for test purpose
n_times, n_channels = 10, 3
seed = np.random.RandomState(0)
diags = 2.0 + 0.1 * seed.randn(n_times, n_channels)
A = 2 * seed.rand(n_channels, n_channels) - 1
A /= np.atleast_2d(np.sqrt(np.sum(A ** 2, 1))).T
covmats = np.empty((n_times, n_channels, n_channels))
for i in range(n_times):
covmats[i] = np.dot(np.dot(A, np.diag(diags[i])), A.T)
V, D = _ajd_pham(covmats)
# Results obtained with original matlab implementation
V_matlab = [[-3.507280775058041, -5.498189967306344, 7.720624541198574],
[0.694689013234610, 0.775690358505945, -1.162043086446043],
[-0.592603135588066, -0.598996925696260, 1.009550086271192]]
assert_array_almost_equal(V, V_matlab)
def test_spoc():
"""Test SPoC."""
X = np.random.randn(10, 10, 20)
y = np.random.randn(10)
spoc = SPoC(n_components=4)
spoc.fit(X, y)
Xt = spoc.transform(X)
assert_array_equal(Xt.shape, [10, 4])
spoc = SPoC(n_components=4, transform_into='csp_space')
spoc.fit(X, y)
Xt = spoc.transform(X)
assert_array_equal(Xt.shape, [10, 4, 20])
assert_array_equal(spoc.filters_.shape, [10, 10])
assert_array_equal(spoc.patterns_.shape, [10, 10])
# check y
pytest.raises(ValueError, spoc.fit, X, y * 0)
# Check that doesn't take CSP-spcific input
pytest.raises(TypeError, SPoC, cov_est='epoch')
# Check mixing matrix on simulated data
rs = np.random.RandomState(42)
y = rs.rand(100) * 50 + 1
X, A = simulate_data(y)
# fit spoc
spoc = SPoC(n_components=1)
spoc.fit(X, y)
# check the first patterns match the mixing matrix
corr = np.abs(np.corrcoef(spoc.patterns_[0, :].T, A[:, 0])[0, 1])
assert np.abs(corr) > 0.99
# check output
out = spoc.transform(X)
corr = np.abs(np.corrcoef(out[:, 0], y)[0, 1])
assert np.abs(corr) > 0.85
def test_csp_twoclass_symmetry():
"""Test that CSP is symmetric when swapping classes."""
x, y = deterministic_toy_data(['class_a', 'class_b'])
csp = CSP(norm_trace=False, transform_into='average_power', log=True)
log_power = csp.fit_transform(x, y)
log_power_ratio_ab = log_power[0] - log_power[1]
x, y = deterministic_toy_data(['class_b', 'class_a'])
csp = CSP(norm_trace=False, transform_into='average_power', log=True)
log_power = csp.fit_transform(x, y)
log_power_ratio_ba = log_power[0] - log_power[1]
assert_array_almost_equal(log_power_ratio_ab,
log_power_ratio_ba)
def test_csp_component_ordering():
"""Test that CSP component ordering works as expected."""
x, y = deterministic_toy_data(['class_a', 'class_b'])
pytest.raises(ValueError, CSP, component_order='invalid')
# component_order='alternate' only works with two classes
csp = CSP(component_order='alternate')
with pytest.raises(ValueError):
csp.fit(np.zeros((3, 0, 0)), ['a', 'b', 'c'])
p_alt = CSP(component_order='alternate').fit(x, y).patterns_
p_mut = CSP(component_order='mutual_info').fit(x, y).patterns_
# This permutation of p_alt and p_mut is explained by the particular
# eigenvalues of the toy data: [0.06, 0.1, 0.5, 0.8].
# p_alt arranges them to [0.8, 0.06, 0.5, 0.1]
# p_mut arranges them to [0.06, 0.1, 0.8, 0.5]
assert_array_almost_equal(p_alt, p_mut[[2, 0, 3, 1]])
| bsd-3-clause |
sangwook236/general-development-and-testing | sw_dev/python/rnd/test/machine_learning/autokeras/autokeras_image.py | 2 | 1321 | #!/usr/bin/env python
import time
import numpy as np
import tensorflow as tf
import autokeras as ak
# REF [site] >> https://autokeras.com/start/
def mnist_example():
# Loads dataset.
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.reshape(x_test.shape + (1,))
#--------------------
clf = ak.ImageClassifier(verbose=True, augment=False)
print('Fitting...')
start_time = time.time()
clf.fit(x_train, y_train, time_limit=12 * 60 * 60) # time_limit in secs.
print('\tElapsed time = {}'.format(time.time() - start_time))
print('Final Fitting...')
start_time = time.time()
clf.final_fit(x_train, y_train, x_test, y_test, retrain=True)
print('\tElapsed time = {}'.format(time.time() - start_time))
print('Evaluating...')
start_time = time.time()
accuracy = clf.evaluate(x_test, y_test)
print('\tElapsed time = {}'.format(time.time() - start_time))
print('Accuracy =', accuracy * 100)
print('Predicting...')
start_time = time.time()
predictions = clf.predict(x_test)
print('\tElapsed time = {}'.format(time.time() - start_time))
print('Predictions =', predictions)
def main():
mnist_example()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-2.0 |
pravsripad/mne-python | mne/forward/tests/test_make_forward.py | 2 | 25671 | from itertools import product
import os
import os.path as op
from pathlib import Path
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose, assert_array_equal
from mne.channels import make_standard_montage
from mne.datasets import testing
from mne.io import read_raw_fif, read_raw_kit, read_raw_bti, read_info
from mne.io.constants import FIFF
from mne import (read_forward_solution, write_forward_solution,
make_forward_solution, convert_forward_solution,
setup_volume_source_space, read_source_spaces, create_info,
make_sphere_model, pick_types_forward, pick_info, pick_types,
read_evokeds, read_cov, read_dipole,
get_volume_labels_from_aseg)
from mne.surface import _get_ico_surface
from mne.transforms import Transform
from mne.utils import (requires_mne, requires_nibabel, run_subprocess,
catch_logging)
from mne.forward._make_forward import _create_meg_coils, make_forward_dipole
from mne.forward._compute_forward import _magnetic_dipole_field_vec
from mne.forward import Forward, _do_forward_solution, use_coil_def
from mne.dipole import Dipole, fit_dipole
from mne.simulation import simulate_evoked
from mne.source_estimate import VolSourceEstimate
from mne.source_space import (write_source_spaces, _compare_source_spaces,
setup_source_space)
from mne.forward.tests.test_forward import assert_forward_allclose
data_path = testing.data_path(download=False)
fname_meeg = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
'test_raw.fif')
fname_evo = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_dip = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = os.path.join(data_path, 'subjects')
fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')
fname_bem = op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_aseg = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
fname_bem_meg = op.join(subjects_dir, 'sample', 'bem',
'sample-1280-bem-sol.fif')
def _compare_forwards(fwd, fwd_py, n_sensors, n_src,
meg_rtol=1e-4, meg_atol=1e-9,
eeg_rtol=1e-3, eeg_atol=1e-3):
"""Test forwards."""
# check source spaces
assert_equal(len(fwd['src']), len(fwd_py['src']))
_compare_source_spaces(fwd['src'], fwd_py['src'], mode='approx')
for surf_ori, force_fixed in product([False, True], [False, True]):
# use copy here to leave our originals unmodified
fwd = convert_forward_solution(fwd, surf_ori, force_fixed, copy=True,
use_cps=True)
fwd_py = convert_forward_solution(fwd_py, surf_ori, force_fixed,
copy=True, use_cps=True)
check_src = n_src // 3 if force_fixed else n_src
for key in ('nchan', 'source_rr', 'source_ori',
'surf_ori', 'coord_frame', 'nsource'):
assert_allclose(fwd_py[key], fwd[key], rtol=1e-4, atol=1e-7,
err_msg=key)
# In surf_ori=True only Z matters for source_nn
if surf_ori and not force_fixed:
ori_sl = slice(2, None, 3)
else:
ori_sl = slice(None)
assert_allclose(fwd_py['source_nn'][ori_sl], fwd['source_nn'][ori_sl],
rtol=1e-4, atol=1e-6)
assert_allclose(fwd_py['mri_head_t']['trans'],
fwd['mri_head_t']['trans'], rtol=1e-5, atol=1e-8)
assert_equal(fwd_py['sol']['data'].shape, (n_sensors, check_src))
assert_equal(len(fwd['sol']['row_names']), n_sensors)
assert_equal(len(fwd_py['sol']['row_names']), n_sensors)
# check MEG
assert_allclose(fwd['sol']['data'][:306, ori_sl],
fwd_py['sol']['data'][:306, ori_sl],
rtol=meg_rtol, atol=meg_atol,
err_msg='MEG mismatch')
# check EEG
if fwd['sol']['data'].shape[0] > 306:
assert_allclose(fwd['sol']['data'][306:, ori_sl],
fwd_py['sol']['data'][306:, ori_sl],
rtol=eeg_rtol, atol=eeg_atol,
err_msg='EEG mismatch')
def test_magnetic_dipole():
"""Test basic magnetic dipole forward calculation."""
info = read_info(fname_raw)
picks = pick_types(info, meg=True, eeg=False, exclude=[])
info = pick_info(info, picks[:12])
coils = _create_meg_coils(info['chs'], 'normal', None)
# magnetic dipole far (meters!) from device origin
r0 = np.array([0., 13., -6.])
for ch, coil in zip(info['chs'], coils):
rr = (ch['loc'][:3] + r0) / 2. # get halfway closer
far_fwd = _magnetic_dipole_field_vec(r0[np.newaxis, :], [coil])
near_fwd = _magnetic_dipole_field_vec(rr[np.newaxis, :], [coil])
ratio = 8. if ch['ch_name'][-1] == '1' else 16. # grad vs mag
assert_allclose(np.median(near_fwd / far_fwd), ratio, atol=1e-1)
# degenerate case
r0 = coils[0]['rmag'][[0]]
with pytest.raises(RuntimeError, match='Coil too close'):
_magnetic_dipole_field_vec(r0, coils[:1])
with pytest.warns(RuntimeWarning, match='Coil too close'):
fwd = _magnetic_dipole_field_vec(r0, coils[:1], too_close='warning')
assert not np.isfinite(fwd).any()
with np.errstate(invalid='ignore'):
fwd = _magnetic_dipole_field_vec(r0, coils[:1], too_close='info')
assert not np.isfinite(fwd).any()
@pytest.mark.slowtest # slow-ish on Travis OSX
@pytest.mark.timeout(60) # can take longer than 30 sec on Travis
@testing.requires_testing_data
@requires_mne
def test_make_forward_solution_kit(tmp_path):
"""Test making fwd using KIT, BTI, and CTF (compensated) files."""
kit_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'kit',
'tests', 'data')
sqd_path = op.join(kit_dir, 'test.sqd')
mrk_path = op.join(kit_dir, 'test_mrk.sqd')
elp_path = op.join(kit_dir, 'test_elp.txt')
hsp_path = op.join(kit_dir, 'test_hsp.txt')
trans_path = op.join(kit_dir, 'trans-sample.fif')
fname_kit_raw = op.join(kit_dir, 'test_bin_raw.fif')
bti_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'bti',
'tests', 'data')
bti_pdf = op.join(bti_dir, 'test_pdf_linux')
bti_config = op.join(bti_dir, 'test_config_linux')
bti_hs = op.join(bti_dir, 'test_hs_linux')
fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_ctf_comp_raw.fif')
# first set up a small testing source space
fname_src_small = tmp_path / 'sample-oct-2-src.fif'
src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir,
add_dist=False)
write_source_spaces(fname_src_small, src) # to enable working with MNE-C
n_src = 108 # this is the resulting # of verts in fwd
# first use mne-C: convert file, make forward solution
fwd = _do_forward_solution('sample', fname_kit_raw, src=fname_src_small,
bem=fname_bem_meg, mri=trans_path,
eeg=False, meg=True, subjects_dir=subjects_dir)
assert (isinstance(fwd, Forward))
# now let's use python with the same raw file
fwd_py = make_forward_solution(fname_kit_raw, trans_path, src,
fname_bem_meg, eeg=False, meg=True)
_compare_forwards(fwd, fwd_py, 157, n_src)
assert (isinstance(fwd_py, Forward))
# now let's use mne-python all the way
raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
# without ignore_ref=True, this should throw an error:
with pytest.raises(NotImplementedError, match='Cannot.*KIT reference'):
make_forward_solution(raw_py.info, src=src, eeg=False, meg=True,
bem=fname_bem_meg, trans=trans_path)
# check that asking for eeg channels (even if they don't exist) is handled
meg_only_info = pick_info(raw_py.info, pick_types(raw_py.info, meg=True,
eeg=False))
fwd_py = make_forward_solution(meg_only_info, src=src, meg=True, eeg=True,
bem=fname_bem_meg, trans=trans_path,
ignore_ref=True)
_compare_forwards(fwd, fwd_py, 157, n_src,
meg_rtol=1e-3, meg_atol=1e-7)
# BTI python end-to-end versus C
fwd = _do_forward_solution('sample', fname_bti_raw, src=fname_src_small,
bem=fname_bem_meg, mri=trans_path,
eeg=False, meg=True, subjects_dir=subjects_dir)
raw_py = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False)
fwd_py = make_forward_solution(raw_py.info, src=src, eeg=False, meg=True,
bem=fname_bem_meg, trans=trans_path)
_compare_forwards(fwd, fwd_py, 248, n_src)
# now let's test CTF w/compensation
fwd_py = make_forward_solution(fname_ctf_raw, fname_trans, src,
fname_bem_meg, eeg=False, meg=True)
fwd = _do_forward_solution('sample', fname_ctf_raw, mri=fname_trans,
src=fname_src_small, bem=fname_bem_meg,
eeg=False, meg=True, subjects_dir=subjects_dir)
_compare_forwards(fwd, fwd_py, 274, n_src)
# CTF with compensation changed in python
ctf_raw = read_raw_fif(fname_ctf_raw)
ctf_raw.info['bads'] = ['MRO24-2908'] # test that it works with some bads
ctf_raw.apply_gradient_compensation(2)
fwd_py = make_forward_solution(ctf_raw.info, fname_trans, src,
fname_bem_meg, eeg=False, meg=True)
fwd = _do_forward_solution('sample', ctf_raw, mri=fname_trans,
src=fname_src_small, bem=fname_bem_meg,
eeg=False, meg=True,
subjects_dir=subjects_dir)
_compare_forwards(fwd, fwd_py, 274, n_src)
fname_temp = tmp_path / 'test-ctf-fwd.fif'
write_forward_solution(fname_temp, fwd_py)
fwd_py2 = read_forward_solution(fname_temp)
_compare_forwards(fwd_py, fwd_py2, 274, n_src)
repr(fwd_py)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_make_forward_solution():
"""Test making M-EEG forward solution from python."""
with catch_logging() as log:
# make sure everything can be path-like (gh #10872)
fwd_py = make_forward_solution(
Path(fname_raw), Path(fname_trans), Path(fname_src),
Path(fname_bem), mindist=5., verbose=True)
log = log.getvalue()
assert 'Total 258/258 points inside the surface' in log
assert (isinstance(fwd_py, Forward))
fwd = read_forward_solution(fname_meeg)
assert (isinstance(fwd, Forward))
_compare_forwards(fwd, fwd_py, 366, 1494, meg_rtol=1e-3)
# Homogeneous model
with pytest.raises(RuntimeError, match='homogeneous.*1-layer.*EEG'):
make_forward_solution(fname_raw, fname_trans, fname_src,
fname_bem_meg)
@testing.requires_testing_data
def test_make_forward_solution_discrete(tmp_path):
"""Test making and converting a forward solution with discrete src."""
# smoke test for depth weighting and discrete source spaces
src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir,
add_dist=False)
src = src + setup_volume_source_space(
pos=dict(rr=src[0]['rr'][src[0]['vertno'][:3]].copy(),
nn=src[0]['nn'][src[0]['vertno'][:3]].copy()))
sphere = make_sphere_model()
fwd = make_forward_solution(fname_raw, fname_trans, src, sphere,
meg=True, eeg=False)
convert_forward_solution(fwd, surf_ori=True)
@testing.requires_testing_data
@requires_mne
@pytest.mark.timeout(90) # can take longer than 60 sec on Travis
def test_make_forward_solution_sphere(tmp_path):
"""Test making a forward solution with a sphere model."""
fname_src_small = tmp_path / 'sample-oct-2-src.fif'
src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir,
add_dist=False)
write_source_spaces(fname_src_small, src) # to enable working with MNE-C
out_name = tmp_path / 'tmp-fwd.fif'
run_subprocess(['mne_forward_solution', '--meg', '--eeg',
'--meas', fname_raw, '--src', fname_src_small,
'--mri', fname_trans, '--fwd', out_name])
fwd = read_forward_solution(out_name)
sphere = make_sphere_model(verbose=True)
fwd_py = make_forward_solution(fname_raw, fname_trans, src, sphere,
meg=True, eeg=True, verbose=True)
_compare_forwards(fwd, fwd_py, 366, 108,
meg_rtol=5e-1, meg_atol=1e-6,
eeg_rtol=5e-1, eeg_atol=5e-1)
# Since the above is pretty lax, let's check a different way
for meg, eeg in zip([True, False], [False, True]):
fwd_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
fwd_py_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
assert_allclose(np.corrcoef(fwd_['sol']['data'].ravel(),
fwd_py_['sol']['data'].ravel())[0, 1],
1.0, rtol=1e-3)
# Number of layers in the sphere model doesn't matter for MEG
# (as long as no sources are omitted due to distance)
assert len(sphere['layers']) == 4
fwd = make_forward_solution(fname_raw, fname_trans, src, sphere,
meg=True, eeg=False)
sphere_1 = make_sphere_model(head_radius=None)
assert len(sphere_1['layers']) == 0
assert_array_equal(sphere['r0'], sphere_1['r0'])
fwd_1 = make_forward_solution(fname_raw, fname_trans, src, sphere,
meg=True, eeg=False)
_compare_forwards(fwd, fwd_1, 306, 108, meg_rtol=1e-12, meg_atol=1e-12)
# Homogeneous model
sphere = make_sphere_model(head_radius=None)
with pytest.raises(RuntimeError, match='zero shells.*EEG'):
make_forward_solution(fname_raw, fname_trans, src, sphere)
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_nibabel()
def test_forward_mixed_source_space(tmp_path):
"""Test making the forward solution for a mixed source space."""
# get the surface source space
rng = np.random.RandomState(0)
surf = read_source_spaces(fname_src)
# setup two volume source spaces
label_names = get_volume_labels_from_aseg(fname_aseg)
vol_labels = rng.choice(label_names, 2)
with pytest.warns(RuntimeWarning, match='Found no usable.*CC_Mid_Ant.*'):
vol1 = setup_volume_source_space('sample', pos=20., mri=fname_aseg,
volume_label=vol_labels[0],
add_interpolator=False)
vol2 = setup_volume_source_space('sample', pos=20., mri=fname_aseg,
volume_label=vol_labels[1],
add_interpolator=False)
# merge surfaces and volume
src = surf + vol1 + vol2
# calculate forward solution
fwd = make_forward_solution(fname_raw, fname_trans, src, fname_bem)
assert (repr(fwd))
# extract source spaces
src_from_fwd = fwd['src']
# get the coordinate frame of each source space
coord_frames = np.array([s['coord_frame'] for s in src_from_fwd])
# assert that all source spaces are in head coordinates
assert ((coord_frames == FIFF.FIFFV_COORD_HEAD).all())
# run tests for SourceSpaces.export_volume
fname_img = tmp_path / 'temp-image.mgz'
# head coordinates and mri_resolution, but trans file
with pytest.raises(ValueError, match='trans containing mri to head'):
src_from_fwd.export_volume(fname_img, mri_resolution=True, trans=None)
# head coordinates and mri_resolution, but wrong trans file
vox_mri_t = vol1[0]['vox_mri_t']
with pytest.raises(ValueError, match='head<->mri, got mri_voxel->mri'):
src_from_fwd.export_volume(fname_img, mri_resolution=True,
trans=vox_mri_t)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_make_forward_dipole(tmp_path):
"""Test forward-projecting dipoles."""
rng = np.random.RandomState(0)
evoked = read_evokeds(fname_evo)[0]
cov = read_cov(fname_cov)
cov['projs'] = [] # avoid proj warning
dip_c = read_dipole(fname_dip)
# Only use magnetometers for speed!
picks = pick_types(evoked.info, meg='mag', eeg=False)[::8]
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.info.normalize_proj()
info = evoked.info
# Make new Dipole object with n_test_dipoles picked from the dipoles
# in the test dataset.
n_test_dipoles = 3 # minimum 3 needed to get uneven sampling in time
dipsel = np.sort(rng.permutation(np.arange(len(dip_c)))[:n_test_dipoles])
dip_test = Dipole(times=dip_c.times[dipsel],
pos=dip_c.pos[dipsel],
amplitude=dip_c.amplitude[dipsel],
ori=dip_c.ori[dipsel],
gof=dip_c.gof[dipsel])
sphere = make_sphere_model(head_radius=0.1)
# Warning emitted due to uneven sampling in time
with pytest.warns(RuntimeWarning, match='unevenly spaced'):
fwd, stc = make_forward_dipole(dip_test, sphere, info,
trans=fname_trans)
# stc is list of VolSourceEstimate's
assert isinstance(stc, list)
for n_dip in range(n_test_dipoles):
assert isinstance(stc[n_dip], VolSourceEstimate)
# Now simulate evoked responses for each of the test dipoles,
# and fit dipoles to them (sphere model, MEG and EEG)
times, pos, amplitude, ori, gof = [], [], [], [], []
nave = 200 # add a tiny amount of noise to the simulated evokeds
for s in stc:
evo_test = simulate_evoked(fwd, s, info, cov,
nave=nave, random_state=rng)
# evo_test.add_proj(make_eeg_average_ref_proj(evo_test.info))
dfit, resid = fit_dipole(evo_test, cov, sphere, None)
times += dfit.times.tolist()
pos += dfit.pos.tolist()
amplitude += dfit.amplitude.tolist()
ori += dfit.ori.tolist()
gof += dfit.gof.tolist()
# Create a new Dipole object with the dipole fits
dip_fit = Dipole(times, pos, amplitude, ori, gof)
# check that true (test) dipoles and fits are "close"
# cf. mne/tests/test_dipole.py
diff = dip_test.pos - dip_fit.pos
corr = np.corrcoef(dip_test.pos.ravel(), dip_fit.pos.ravel())[0, 1]
dist = np.sqrt(np.mean(np.sum(diff * diff, axis=1)))
gc_dist = 180 / np.pi * \
np.mean(np.arccos(np.sum(dip_test.ori * dip_fit.ori, axis=1)))
amp_err = np.sqrt(np.mean((dip_test.amplitude - dip_fit.amplitude) ** 2))
# Make sure each coordinate is close to reference
# NB tolerance should be set relative to snr of simulated evoked!
assert_allclose(dip_fit.pos, dip_test.pos, rtol=0, atol=1e-2,
err_msg='position mismatch')
assert dist < 1e-2 # within 1 cm
assert corr > 0.985
assert gc_dist < 20 # less than 20 degrees
assert amp_err < 10e-9 # within 10 nAm
# Make sure rejection works with BEM: one dipole at z=1m
# NB _make_forward.py:_prepare_for_forward will raise a RuntimeError
# if no points are left after min_dist exclusions, hence 2 dips here!
dip_outside = Dipole(times=[0., 0.001],
pos=[[0., 0., 1.0], [0., 0., 0.040]],
amplitude=[100e-9, 100e-9],
ori=[[1., 0., 0.], [1., 0., 0.]], gof=1)
with pytest.raises(ValueError, match='outside the inner skull'):
make_forward_dipole(dip_outside, fname_bem, info, fname_trans)
# if we get this far, can safely assume the code works with BEMs too
# -> use sphere again below for speed
# Now make an evenly sampled set of dipoles, some simultaneous,
# should return a VolSourceEstimate regardless
times = [0., 0., 0., 0.001, 0.001, 0.002]
pos = np.random.rand(6, 3) * 0.020 + \
np.array([0., 0., 0.040])[np.newaxis, :]
amplitude = np.random.rand(6) * 100e-9
ori = np.eye(6, 3) + np.eye(6, 3, -3)
gof = np.arange(len(times)) / len(times) # arbitrary
dip_even_samp = Dipole(times, pos, amplitude, ori, gof)
# I/O round-trip
fname = str(tmp_path / 'test-fwd.fif')
with pytest.warns(RuntimeWarning, match='free orientation'):
write_forward_solution(fname, fwd)
fwd_read = convert_forward_solution(
read_forward_solution(fname), force_fixed=True)
assert_forward_allclose(fwd, fwd_read, rtol=1e-6)
fwd, stc = make_forward_dipole(dip_even_samp, sphere, info,
trans=fname_trans)
assert isinstance(stc, VolSourceEstimate)
assert_allclose(stc.times, np.arange(0., 0.003, 0.001))
# Test passing a list of Dipoles instead of a single Dipole object
fwd2, stc2 = make_forward_dipole([dip_even_samp[0], dip_even_samp[1:]],
sphere, info, trans=fname_trans)
assert_array_equal(fwd['sol']['data'], fwd2['sol']['data'])
assert_array_equal(stc.data, stc2.data)
@testing.requires_testing_data
def test_make_forward_no_meg(tmp_path):
"""Test that we can make and I/O forward solution with no MEG channels."""
pos = dict(rr=[[0.05, 0, 0]], nn=[[0, 0, 1.]])
src = setup_volume_source_space(pos=pos)
bem = make_sphere_model()
trans = None
montage = make_standard_montage('standard_1020')
info = create_info(['Cz'], 1000., 'eeg').set_montage(montage)
fwd = make_forward_solution(info, trans, src, bem)
fname = tmp_path / 'test-fwd.fif'
write_forward_solution(fname, fwd)
fwd_read = read_forward_solution(fname)
assert_allclose(fwd['sol']['data'], fwd_read['sol']['data'])
def test_use_coil_def(tmp_path):
"""Test use_coil_def."""
info = create_info(1, 1000., 'mag')
info['chs'][0]['coil_type'] = 9999
info['chs'][0]['loc'][:] = [0, 0, 0.02, 1, 0, 0, 0, 1, 0, 0, 0, 1]
sphere = make_sphere_model((0., 0., 0.), 0.01)
src = setup_volume_source_space(pos=5, sphere=sphere)
trans = Transform('head', 'mri', None)
with pytest.raises(RuntimeError, match='coil definition not found'):
make_forward_solution(info, trans, src, sphere)
coil_fname = tmp_path / 'coil_def.dat'
with open(coil_fname, 'w') as fid:
fid.write("""# custom cube coil def
1 9999 2 8 3e-03 0.000e+00 "Test"
0.1250 -0.750e-03 -0.750e-03 -0.750e-03 0.000 0.000""")
with pytest.raises(RuntimeError, match='Could not interpret'):
with use_coil_def(coil_fname):
make_forward_solution(info, trans, src, sphere)
with open(coil_fname, 'w') as fid:
fid.write("""# custom cube coil def
1 9999 2 8 3e-03 0.000e+00 "Test"
0.1250 -0.750e-03 -0.750e-03 -0.750e-03 0.000 0.000 1.000
0.1250 -0.750e-03 0.750e-03 -0.750e-03 0.000 0.000 1.000
0.1250 0.750e-03 -0.750e-03 -0.750e-03 0.000 0.000 1.000
0.1250 0.750e-03 0.750e-03 -0.750e-03 0.000 0.000 1.000
0.1250 -0.750e-03 -0.750e-03 0.750e-03 0.000 0.000 1.000
0.1250 -0.750e-03 0.750e-03 0.750e-03 0.000 0.000 1.000
0.1250 0.750e-03 -0.750e-03 0.750e-03 0.000 0.000 1.000
0.1250 0.750e-03 0.750e-03 0.750e-03 0.000 0.000 1.000""")
with use_coil_def(coil_fname):
make_forward_solution(info, trans, src, sphere)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_sensors_inside_bem():
"""Test that sensors inside the BEM are problematic."""
rr = _get_ico_surface(1)['rr']
rr /= np.linalg.norm(rr, axis=1, keepdims=True)
rr *= 0.1
assert len(rr) == 42
info = create_info(len(rr), 1000., 'mag')
info['dev_head_t'] = Transform('meg', 'head', np.eye(4))
for ii, ch in enumerate(info['chs']):
ch['loc'][:] = np.concatenate((rr[ii], np.eye(3).ravel()))
trans = Transform('head', 'mri', np.eye(4))
trans['trans'][2, 3] = 0.03
sphere_noshell = make_sphere_model((0., 0., 0.), None)
sphere = make_sphere_model((0., 0., 0.), 1.01)
with pytest.raises(RuntimeError, match='.* 15 MEG.*inside the scalp.*'):
make_forward_solution(info, trans, fname_src, fname_bem)
make_forward_solution(info, trans, fname_src, fname_bem_meg) # okay
make_forward_solution(info, trans, fname_src, sphere_noshell) # okay
with pytest.raises(RuntimeError, match='.* 42 MEG.*outermost sphere sh.*'):
make_forward_solution(info, trans, fname_src, sphere)
sphere = make_sphere_model((0., 0., 2.0), 1.01) # weird, but okay
make_forward_solution(info, trans, fname_src, sphere)
for ch in info['chs']:
ch['loc'][:3] *= 0.1
with pytest.raises(RuntimeError, match='.* 42 MEG.*the inner skull.*'):
make_forward_solution(info, trans, fname_src, fname_bem_meg)
| bsd-3-clause |
AtsushiSakai/jsk_visualization_packages | jsk_recognition_utils/python/jsk_recognition_utils/feature.py | 1 | 1203 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from sklearn.cluster import MiniBatchKMeans
from sklearn.neighbors import NearestNeighbors
class BagOfFeatures(object):
def __init__(self, hist_size=500):
self.nn = None
self.hist_size = hist_size
def fit(self, X):
"""Fit features and extract bag of features"""
k = self.hist_size
km = MiniBatchKMeans(n_clusters=k, init_size=3*k, max_iter=300)
km.fit(X)
nn = NearestNeighbors(n_neighbors=1)
nn.fit(km.cluster_centers_)
self.nn = nn
def transform(self, X):
return np.vstack([self.make_hist(xi.reshape((-1, 128))) for xi in X])
def make_hist(self, descriptors):
"""Make histogram for one image"""
nn = self.nn
if nn is None:
raise ValueError('must fit features before making histogram')
indices = nn.kneighbors(descriptors, return_distance=False)
histogram = np.zeros(self.hist_size)
for idx in np.unique(indices):
mask = indices == idx
histogram[idx] = mask.sum() # count the idx
indices = indices[mask == False]
return histogram
| mit |
schets/scikit-learn | sklearn/tests/test_learning_curve.py | 224 | 10791 | # Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
pravsripad/mne-python | mne/decoding/search_light.py | 7 | 25797 | # Author: Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD-3-Clause
import numpy as np
from .mixin import TransformerMixin
from .base import BaseEstimator, _check_estimator
from ..fixes import _get_check_scoring
from ..parallel import parallel_func
from ..utils import (array_split_idx, ProgressBar,
verbose, fill_doc)
@fill_doc
class SlidingEstimator(BaseEstimator, TransformerMixin):
"""Search Light.
Fit, predict and score a series of models to each subset of the dataset
along the last dimension. Each entry in the last dimension is referred
to as a task.
Parameters
----------
%(base_estimator)s
%(scoring)s
%(n_jobs)s
%(verbose)s
Attributes
----------
estimators_ : array-like, shape (n_tasks,)
List of fitted scikit-learn estimators (one per task).
"""
@verbose
def __init__(self, base_estimator, scoring=None, n_jobs=None, *,
verbose=None): # noqa: D102
_check_estimator(base_estimator)
self._estimator_type = getattr(base_estimator, "_estimator_type", None)
self.base_estimator = base_estimator
self.n_jobs = n_jobs
self.scoring = scoring
def __repr__(self): # noqa: D105
repr_str = '<' + super(SlidingEstimator, self).__repr__()
if hasattr(self, 'estimators_'):
repr_str = repr_str[:-1]
repr_str += ', fitted with %i estimators' % len(self.estimators_)
return repr_str + '>'
def fit(self, X, y, **fit_params):
"""Fit a series of independent estimators to the dataset.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The training input samples. For each data slice, a clone estimator
is fitted independently. The feature dimension can be
multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks).
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
**fit_params : dict of string -> object
Parameters to pass to the fit method of the estimator.
Returns
-------
self : object
Return self.
"""
self._check_Xy(X, y)
parallel, p_func, n_jobs = parallel_func(
_sl_fit, self.n_jobs, max_jobs=X.shape[-1], verbose=False)
self.estimators_ = list()
self.fit_params = fit_params
# For fitting, the parallelization is across estimators.
mesg = 'Fitting %s' % (self.__class__.__name__,)
with ProgressBar(X.shape[-1], mesg=mesg) as pb:
estimators = parallel(
p_func(self.base_estimator, split, y, pb.subset(pb_idx),
**fit_params)
for pb_idx, split in array_split_idx(X, n_jobs, axis=-1))
# Each parallel job can have a different number of training estimators
# We can't directly concatenate them because of sklearn's Bagging API
# (see scikit-learn #9720)
self.estimators_ = np.empty(X.shape[-1], dtype=object)
idx = 0
for job_estimators in estimators:
for est in job_estimators:
self.estimators_[idx] = est
idx += 1
return self
def fit_transform(self, X, y, **fit_params):
"""Fit and transform a series of independent estimators to the dataset.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The training input samples. For each task, a clone estimator
is fitted independently. The feature dimension can be
multidimensional, e.g.::
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
**fit_params : dict of string -> object
Parameters to pass to the fit method of the estimator.
Returns
-------
y_pred : array, shape (n_samples, n_tasks) | (n_samples, n_tasks, n_targets)
The predicted values for each estimator.
""" # noqa: E501
return self.fit(X, y, **fit_params).transform(X)
def _transform(self, X, method):
"""Aux. function to make parallel predictions/transformation."""
self._check_Xy(X)
method = _check_method(self.base_estimator, method)
if X.shape[-1] != len(self.estimators_):
raise ValueError('The number of estimators does not match '
'X.shape[-1]')
# For predictions/transforms the parallelization is across the data and
# not across the estimators to avoid memory load.
parallel, p_func, n_jobs = parallel_func(
_sl_transform, self.n_jobs, max_jobs=X.shape[-1], verbose=False)
mesg = 'Transforming %s' % (self.__class__.__name__,)
X_splits = np.array_split(X, n_jobs, axis=-1)
idx, est_splits = zip(*array_split_idx(self.estimators_, n_jobs))
with ProgressBar(X.shape[-1], mesg=mesg) as pb:
y_pred = parallel(p_func(est, x, method, pb.subset(pb_idx))
for pb_idx, est, x in zip(
idx, est_splits, X_splits))
y_pred = np.concatenate(y_pred, axis=1)
return y_pred
def transform(self, X):
"""Transform each data slice/task with a series of independent estimators.
The number of tasks in X should match the number of tasks/estimators
given at fit time.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice/task, the corresponding
estimator makes a transformation of the data, e.g.
``[estimators[ii].transform(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks).
Returns
-------
Xt : array, shape (n_samples, n_estimators)
The transformed values generated by each estimator.
""" # noqa: E501
return self._transform(X, 'transform')
def predict(self, X):
"""Predict each data slice/task with a series of independent estimators.
The number of tasks in X should match the number of tasks/estimators
given at fit time.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice, the corresponding estimator
makes the sample predictions, e.g.:
``[estimators[ii].predict(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks).
Returns
-------
y_pred : array, shape (n_samples, n_estimators) | (n_samples, n_tasks, n_targets)
Predicted values for each estimator/data slice.
""" # noqa: E501
return self._transform(X, 'predict')
def predict_proba(self, X):
"""Predict each data slice with a series of independent estimators.
The number of tasks in X should match the number of tasks/estimators
given at fit time.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice, the corresponding estimator
makes the sample probabilistic predictions, e.g.:
``[estimators[ii].predict_proba(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks).
Returns
-------
y_pred : array, shape (n_samples, n_tasks, n_classes)
Predicted probabilities for each estimator/data slice/task.
""" # noqa: E501
return self._transform(X, 'predict_proba')
def decision_function(self, X):
"""Estimate distances of each data slice to the hyperplanes.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice, the corresponding estimator
outputs the distance to the hyperplane, e.g.:
``[estimators[ii].decision_function(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators).
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_classes * (n_classes-1) // 2)
Predicted distances for each estimator/data slice.
Notes
-----
This requires base_estimator to have a ``decision_function`` method.
""" # noqa: E501
return self._transform(X, 'decision_function')
def _check_Xy(self, X, y=None):
"""Aux. function to check input data."""
if y is not None:
if len(X) != len(y) or len(y) < 1:
raise ValueError('X and y must have the same length.')
if X.ndim < 3:
raise ValueError('X must have at least 3 dimensions.')
def score(self, X, y):
"""Score each estimator on each task.
The number of tasks in X should match the number of tasks/estimators
given at fit time, i.e. we need
``X.shape[-1] == len(self.estimators_)``.
Parameters
----------
X : array, shape (n_samples, nd_features, n_tasks)
The input samples. For each data slice, the corresponding estimator
scores the prediction, e.g.:
``[estimators[ii].score(X[..., ii], y) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks).
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_samples, n_estimators)
Score for each estimator/task.
""" # noqa: E501
check_scoring = _get_check_scoring()
self._check_Xy(X)
if X.shape[-1] != len(self.estimators_):
raise ValueError('The number of estimators does not match '
'X.shape[-1]')
scoring = check_scoring(self.base_estimator, self.scoring)
y = _fix_auc(scoring, y)
# For predictions/transforms the parallelization is across the data and
# not across the estimators to avoid memory load.
parallel, p_func, n_jobs = parallel_func(
_sl_score, self.n_jobs, max_jobs=X.shape[-1])
X_splits = np.array_split(X, n_jobs, axis=-1)
est_splits = np.array_split(self.estimators_, n_jobs)
score = parallel(p_func(est, scoring, x, y)
for (est, x) in zip(est_splits, X_splits))
score = np.concatenate(score, axis=0)
return score
@property
def classes_(self):
if not hasattr(self.estimators_[0], 'classes_'):
raise AttributeError('classes_ attribute available only if '
'base_estimator has it, and estimator %s does'
' not' % (self.estimators_[0],))
return self.estimators_[0].classes_
@fill_doc
def _sl_fit(estimator, X, y, pb, **fit_params):
"""Aux. function to fit SlidingEstimator in parallel.
Fit a clone estimator to each slice of data.
Parameters
----------
%(base_estimator)s
X : array, shape (n_samples, nd_features, n_estimators)
The target data. The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
y : array, shape (n_sample, )
The target values.
fit_params : dict | None
Parameters to pass to the fit method of the estimator.
Returns
-------
estimators_ : list of estimators
The fitted estimators.
"""
from sklearn.base import clone
estimators_ = list()
for ii in range(X.shape[-1]):
est = clone(estimator)
est.fit(X[..., ii], y, **fit_params)
estimators_.append(est)
pb.update(ii + 1)
return estimators_
def _sl_transform(estimators, X, method, pb):
"""Aux. function to transform SlidingEstimator in parallel.
Applies transform/predict/decision_function etc for each slice of data.
Parameters
----------
estimators : list of estimators
The fitted estimators.
X : array, shape (n_samples, nd_features, n_estimators)
The target data. The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
method : str
The estimator method to use (e.g. 'predict', 'transform').
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_classes * (n_classes-1) // 2)
The transformations for each slice of data.
""" # noqa: E501
for ii, est in enumerate(estimators):
transform = getattr(est, method)
_y_pred = transform(X[..., ii])
# Initialize array of predictions on the first transform iteration
if ii == 0:
y_pred = _sl_init_pred(_y_pred, X)
y_pred[:, ii, ...] = _y_pred
pb.update(ii + 1)
return y_pred
def _sl_init_pred(y_pred, X):
"""Aux. function to SlidingEstimator to initialize y_pred."""
n_sample, n_tasks = X.shape[0], X.shape[-1]
y_pred = np.zeros((n_sample, n_tasks) + y_pred.shape[1:], y_pred.dtype)
return y_pred
def _sl_score(estimators, scoring, X, y):
"""Aux. function to score SlidingEstimator in parallel.
Predict and score each slice of data.
Parameters
----------
estimators : list, shape (n_tasks,)
The fitted estimators.
X : array, shape (n_samples, nd_features, n_tasks)
The target data. The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_tasks)
scoring : callable, str or None
If scoring is None (default), the predictions are internally
generated by estimator.score(). Else, we must first get the
predictions to pass them to ad-hoc scorer.
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_tasks,)
The score for each task / slice of data.
"""
n_tasks = X.shape[-1]
score = np.zeros(n_tasks)
for ii, est in enumerate(estimators):
score[ii] = scoring(est, X[..., ii], y)
return score
def _check_method(estimator, method):
"""Check that an estimator has the method attribute.
If method == 'transform' and estimator does not have 'transform', use
'predict' instead.
"""
if method == 'transform' and not hasattr(estimator, 'transform'):
method = 'predict'
if not hasattr(estimator, method):
ValueError('base_estimator does not have `%s` method.' % method)
return method
@fill_doc
class GeneralizingEstimator(SlidingEstimator):
"""Generalization Light.
Fit a search-light along the last dimension and use them to apply a
systematic cross-tasks generalization.
Parameters
----------
%(base_estimator)s
%(scoring)s
%(n_jobs)s
%(verbose)s
"""
def __repr__(self): # noqa: D105
repr_str = super(GeneralizingEstimator, self).__repr__()
if hasattr(self, 'estimators_'):
repr_str = repr_str[:-1]
repr_str += ', fitted with %i estimators>' % len(self.estimators_)
return repr_str
def _transform(self, X, method):
"""Aux. function to make parallel predictions/transformation."""
self._check_Xy(X)
method = _check_method(self.base_estimator, method)
mesg = 'Transforming %s' % (self.__class__.__name__,)
parallel, p_func, n_jobs = parallel_func(
_gl_transform, self.n_jobs, max_jobs=X.shape[-1], verbose=False)
with ProgressBar(X.shape[-1] * len(self.estimators_), mesg=mesg) as pb:
y_pred = parallel(
p_func(self.estimators_, x_split, method, pb.subset(pb_idx))
for pb_idx, x_split in array_split_idx(
X, n_jobs, axis=-1, n_per_split=len(self.estimators_)))
y_pred = np.concatenate(y_pred, axis=2)
return y_pred
def transform(self, X):
"""Transform each data slice with all possible estimators.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The input samples. For estimator the corresponding data slice is
used to make a transformation. The feature dimension can be
multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators).
Returns
-------
Xt : array, shape (n_samples, n_estimators, n_slices)
The transformed values generated by each estimator.
"""
return self._transform(X, 'transform')
def predict(self, X):
"""Predict each data slice with all possible estimators.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The training input samples. For each data slice, a fitted estimator
predicts each slice of the data independently. The feature
dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators).
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_slices) | (n_samples, n_estimators, n_slices, n_targets)
The predicted values for each estimator.
""" # noqa: E501
return self._transform(X, 'predict')
def predict_proba(self, X):
"""Estimate probabilistic estimates of each data slice with all possible estimators.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The training input samples. For each data slice, a fitted estimator
predicts a slice of the data. The feature dimension can be
multidimensional e.g.
``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``.
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_slices, n_classes)
The predicted values for each estimator.
Notes
-----
This requires ``base_estimator`` to have a ``predict_proba`` method.
""" # noqa: E501
return self._transform(X, 'predict_proba')
def decision_function(self, X):
"""Estimate distances of each data slice to all hyperplanes.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The training input samples. Each estimator outputs the distance to
its hyperplane, e.g.:
``[estimators[ii].decision_function(X[..., ii]) for ii in range(n_estimators)]``.
The feature dimension can be multidimensional e.g.
``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``.
Returns
-------
y_pred : array, shape (n_samples, n_estimators, n_slices, n_classes * (n_classes-1) // 2)
The predicted values for each estimator.
Notes
-----
This requires ``base_estimator`` to have a ``decision_function``
method.
""" # noqa: E501
return self._transform(X, 'decision_function')
def score(self, X, y):
"""Score each of the estimators on the tested dimensions.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The input samples. For each data slice, the corresponding estimator
scores the prediction, e.g.:
``[estimators[ii].score(X[..., ii], y) for ii in range(n_slices)]``.
The feature dimension can be multidimensional e.g.
``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``.
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_samples, n_estimators, n_slices)
Score for each estimator / data slice couple.
""" # noqa: E501
check_scoring = _get_check_scoring()
self._check_Xy(X)
# For predictions/transforms the parallelization is across the data and
# not across the estimators to avoid memory load.
mesg = 'Scoring %s' % (self.__class__.__name__,)
parallel, p_func, n_jobs = parallel_func(
_gl_score, self.n_jobs, max_jobs=X.shape[-1], verbose=False)
scoring = check_scoring(self.base_estimator, self.scoring)
y = _fix_auc(scoring, y)
with ProgressBar(X.shape[-1] * len(self.estimators_), mesg=mesg) as pb:
score = parallel(p_func(self.estimators_, scoring, x, y,
pb.subset(pb_idx))
for pb_idx, x in array_split_idx(
X, n_jobs, axis=-1,
n_per_split=len(self.estimators_)))
score = np.concatenate(score, axis=1)
return score
def _gl_transform(estimators, X, method, pb):
"""Transform the dataset.
This will apply each estimator to all slices of the data.
Parameters
----------
X : array, shape (n_samples, nd_features, n_slices)
The training input samples. For each data slice, a clone estimator
is fitted independently. The feature dimension can be multidimensional
e.g. X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
Returns
-------
Xt : array, shape (n_samples, n_slices)
The transformed values generated by each estimator.
"""
n_sample, n_iter = X.shape[0], X.shape[-1]
for ii, est in enumerate(estimators):
# stack generalized data for faster prediction
X_stack = X.transpose(np.r_[0, X.ndim - 1, range(1, X.ndim - 1)])
X_stack = X_stack.reshape(np.r_[n_sample * n_iter, X_stack.shape[2:]])
transform = getattr(est, method)
_y_pred = transform(X_stack)
# unstack generalizations
if _y_pred.ndim == 2:
_y_pred = np.reshape(_y_pred, [n_sample, n_iter, _y_pred.shape[1]])
else:
shape = np.r_[n_sample, n_iter, _y_pred.shape[1:]].astype(int)
_y_pred = np.reshape(_y_pred, shape)
# Initialize array of predictions on the first transform iteration
if ii == 0:
y_pred = _gl_init_pred(_y_pred, X, len(estimators))
y_pred[:, ii, ...] = _y_pred
pb.update((ii + 1) * n_iter)
return y_pred
def _gl_init_pred(y_pred, X, n_train):
"""Aux. function to GeneralizingEstimator to initialize y_pred."""
n_sample, n_iter = X.shape[0], X.shape[-1]
if y_pred.ndim == 3:
y_pred = np.zeros((n_sample, n_train, n_iter, y_pred.shape[-1]),
y_pred.dtype)
else:
y_pred = np.zeros((n_sample, n_train, n_iter), y_pred.dtype)
return y_pred
def _gl_score(estimators, scoring, X, y, pb):
"""Score GeneralizingEstimator in parallel.
Predict and score each slice of data.
Parameters
----------
estimators : list of estimators
The fitted estimators.
scoring : callable, string or None
If scoring is None (default), the predictions are internally
generated by estimator.score(). Else, we must first get the
predictions to pass them to ad-hoc scorer.
X : array, shape (n_samples, nd_features, n_slices)
The target data. The feature dimension can be multidimensional e.g.
X.shape = (n_samples, n_features_1, n_features_2, n_estimators)
y : array, shape (n_samples,) | (n_samples, n_targets)
The target values.
Returns
-------
score : array, shape (n_estimators, n_slices)
The score for each slice of data.
"""
# FIXME: The level parallelization may be a bit high, and might be memory
# consuming. Perhaps need to lower it down to the loop across X slices.
score_shape = [len(estimators), X.shape[-1]]
for jj in range(X.shape[-1]):
for ii, est in enumerate(estimators):
_score = scoring(est, X[..., jj], y)
# Initialize array of predictions on the first score iteration
if (ii == 0) and (jj == 0):
dtype = type(_score)
score = np.zeros(score_shape, dtype)
score[ii, jj, ...] = _score
pb.update(jj * len(estimators) + ii + 1)
return score
def _fix_auc(scoring, y):
from sklearn.preprocessing import LabelEncoder
# This fixes sklearn's inability to compute roc_auc when y not in [0, 1]
# scikit-learn/scikit-learn#6874
if scoring is not None:
score_func = getattr(scoring, '_score_func', None)
kwargs = getattr(scoring, '_kwargs', {})
if (getattr(score_func, '__name__', '') == 'roc_auc_score' and
kwargs.get('multi_class', 'raise') == 'raise'):
if np.ndim(y) != 1 or len(set(y)) != 2:
raise ValueError('roc_auc scoring can only be computed for '
'two-class problems.')
y = LabelEncoder().fit_transform(y)
return y
| bsd-3-clause |
keras-team/keras-io | examples/vision/consistency_training.py | 1 | 13728 | """
Title: Consistency training with supervision
Author: [Sayak Paul](https://twitter.com/RisingSayak)
Date created: 2021/04/13
Last modified: 2021/04/19
Description: Training with consistency regularization for robustness against data distribution shifts.
"""
"""
Deep learning models excel in many image recognition tasks when the data is independent
and identically distributed (i.i.d.). However, they can suffer from performance
degradation caused by subtle distribution shifts in the input data (such as random
noise, contrast change, and blurring). So, naturally, there arises a question of
why. As discussed in [A Fourier Perspective on Model Robustness in Computer Vision](https://arxiv.org/pdf/1906.08988.pdf)),
there's no reason for deep learning models to be robust against such shifts. Standard
model training procedures (such as standard image classification training workflows)
*don't* enable a model to learn beyond what's fed to it in the form of training data.
In this example, we will be training an image classification model enforcing a sense of
*consistency* inside it by doing the following:
* Train a standard image classification model.
* Train an _equal or larger_ model on a noisy version of the dataset (augmented using
[RandAugment](https://arxiv.org/abs/1909.13719)).
* To do this, we will first obtain predictions of the previous model on the clean images
of the dataset.
* We will then use these predictions and train the second model to match these
predictions on the noisy variant of the same images. This is identical to the workflow of
[*Knowledge Distillation*](https://keras.io/examples/vision/knowledge_distillation/) but
since the student model is equal or larger in size this process is also referred to as
***Self-Training***.
This overall training workflow finds its roots in works like
[FixMatch](https://arxiv.org/abs/2001.07685), [Unsupervised Data Augmentation for Consistency Training](https://arxiv.org/abs/1904.12848),
and [Noisy Student Training](https://arxiv.org/abs/1911.04252). Since this training
process encourages a model yield consistent predictions for clean as well as noisy
images, it's often referred to as *consistency training* or *training with consistency
regularization*. Although the example focuses on using consistency training to enhance
the robustness of models to common corruptions this example can also serve a template
for performing _weakly supervised learning_.
This example requires TensorFlow 2.4 or higher, as well as TensorFlow Hub and TensorFlow
Models, which can be installed using the following command:
"""
"""shell
pip install -q tf-models-official tensorflow-addons
"""
"""
## Imports and setup
"""
from official.vision.image_classification.augment import RandAugment
from tensorflow.keras import layers
import tensorflow as tf
import tensorflow_addons as tfa
import matplotlib.pyplot as plt
tf.random.set_seed(42)
"""
## Define hyperparameters
"""
AUTO = tf.data.AUTOTUNE
BATCH_SIZE = 128
EPOCHS = 5
CROP_TO = 72
RESIZE_TO = 96
"""
## Load the CIFAR-10 dataset
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
val_samples = 49500
new_train_x, new_y_train = x_train[: val_samples + 1], y_train[: val_samples + 1]
val_x, val_y = x_train[val_samples:], y_train[val_samples:]
"""
## Create TensorFlow `Dataset` objects
"""
# Initialize `RandAugment` object with 2 layers of
# augmentation transforms and strength of 9.
augmenter = RandAugment(num_layers=2, magnitude=9)
"""
For training the teacher model, we will only be using two geometric augmentation
transforms: random horizontal flip and random crop.
"""
def preprocess_train(image, label, noisy=True):
image = tf.image.random_flip_left_right(image)
# We first resize the original image to a larger dimension
# and then we take random crops from it.
image = tf.image.resize(image, [RESIZE_TO, RESIZE_TO])
image = tf.image.random_crop(image, [CROP_TO, CROP_TO, 3])
if noisy:
image = augmenter.distort(image)
return image, label
def preprocess_test(image, label):
image = tf.image.resize(image, [CROP_TO, CROP_TO])
return image, label
train_ds = tf.data.Dataset.from_tensor_slices((new_train_x, new_y_train))
validation_ds = tf.data.Dataset.from_tensor_slices((val_x, val_y))
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
"""
We make sure `train_clean_ds` and `train_noisy_ds` are shuffled using the *same* seed to
ensure their orders are exactly the same. This will be helpful during training the
student model.
"""
# This dataset will be used to train the first model.
train_clean_ds = (
train_ds.shuffle(BATCH_SIZE * 10, seed=42)
.map(lambda x, y: (preprocess_train(x, y, noisy=False)), num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
# This prepares the `Dataset` object to use RandAugment.
train_noisy_ds = (
train_ds.shuffle(BATCH_SIZE * 10, seed=42)
.map(preprocess_train, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
validation_ds = (
validation_ds.map(preprocess_test, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
test_ds = (
test_ds.map(preprocess_test, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
# This dataset will be used to train the second model.
consistency_training_ds = tf.data.Dataset.zip((train_clean_ds, train_noisy_ds))
"""
## Visualize the datasets
"""
sample_images, sample_labels = next(iter(train_clean_ds))
plt.figure(figsize=(10, 10))
for i, image in enumerate(sample_images[:9]):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image.numpy().astype("int"))
plt.axis("off")
sample_images, sample_labels = next(iter(train_noisy_ds))
plt.figure(figsize=(10, 10))
for i, image in enumerate(sample_images[:9]):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image.numpy().astype("int"))
plt.axis("off")
"""
## Define a model building utility function
We now define our model building utility. Our model is based on the [ResNet50V2 architecture](https://arxiv.org/abs/1603.05027).
"""
def get_training_model(num_classes=10):
resnet50_v2 = tf.keras.applications.ResNet50V2(
weights=None,
include_top=False,
input_shape=(CROP_TO, CROP_TO, 3),
)
model = tf.keras.Sequential(
[
layers.Input((CROP_TO, CROP_TO, 3)),
layers.Rescaling(scale=1.0 / 127.5, offset=-1),
resnet50_v2,
layers.GlobalAveragePooling2D(),
layers.Dense(num_classes),
]
)
return model
"""
In the interest of reproducibility, we serialize the initial random weights of the
teacher network.
"""
initial_teacher_model = get_training_model()
initial_teacher_model.save_weights("initial_teacher_model.h5")
"""
## Train the teacher model
As noted in Noisy Student Training, if the teacher model is trained with *geometric
ensembling* and when the student model is forced to mimic that, it leads to better
performance. The original work uses [Stochastic Depth](https://arxiv.org/abs/1603.09382)
and [Dropout](https://jmlr.org/papers/v15/srivastava14a.html) to bring in the ensembling
part but for this example, we will use [Stochastic Weight Averaging](https://arxiv.org/abs/1803.05407)
(SWA) which also resembles geometric ensembling.
"""
# Define the callbacks.
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(patience=3)
early_stopping = tf.keras.callbacks.EarlyStopping(
patience=10, restore_best_weights=True
)
# Initialize SWA from tf-hub.
SWA = tfa.optimizers.SWA
# Compile and train the teacher model.
teacher_model = get_training_model()
teacher_model.load_weights("initial_teacher_model.h5")
teacher_model.compile(
# Notice that we are wrapping our optimizer within SWA
optimizer=SWA(tf.keras.optimizers.Adam()),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
history = teacher_model.fit(
train_clean_ds,
epochs=EPOCHS,
validation_data=validation_ds,
callbacks=[reduce_lr, early_stopping],
)
# Evaluate the teacher model on the test set.
_, acc = teacher_model.evaluate(test_ds, verbose=0)
print(f"Test accuracy: {acc*100}%")
"""
## Define a self-training utility
For this part, we will borrow the `Distiller` class from [this Keras Example](https://keras.io/examples/vision/knowledge_distillation/).
"""
# Majority of the code is taken from:
# https://keras.io/examples/vision/knowledge_distillation/
class SelfTrainer(tf.keras.Model):
def __init__(self, student, teacher):
super(SelfTrainer, self).__init__()
self.student = student
self.teacher = teacher
def compile(
self,
optimizer,
metrics,
student_loss_fn,
distillation_loss_fn,
temperature=3,
):
super(SelfTrainer, self).compile(optimizer=optimizer, metrics=metrics)
self.student_loss_fn = student_loss_fn
self.distillation_loss_fn = distillation_loss_fn
self.temperature = temperature
def train_step(self, data):
# Since our dataset is a zip of two independent datasets,
# after initially parsing them, we segregate the
# respective images and labels next.
clean_ds, noisy_ds = data
clean_images, _ = clean_ds
noisy_images, y = noisy_ds
# Forward pass of teacher
teacher_predictions = self.teacher(clean_images, training=False)
with tf.GradientTape() as tape:
# Forward pass of student
student_predictions = self.student(noisy_images, training=True)
# Compute losses
student_loss = self.student_loss_fn(y, student_predictions)
distillation_loss = self.distillation_loss_fn(
tf.nn.softmax(teacher_predictions / self.temperature, axis=1),
tf.nn.softmax(student_predictions / self.temperature, axis=1),
)
total_loss = (student_loss + distillation_loss) / 2
# Compute gradients
trainable_vars = self.student.trainable_variables
gradients = tape.gradient(total_loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update the metrics configured in `compile()`
self.compiled_metrics.update_state(
y, tf.nn.softmax(student_predictions, axis=1)
)
# Return a dict of performance
results = {m.name: m.result() for m in self.metrics}
results.update({"total_loss": total_loss})
return results
def test_step(self, data):
# During inference, we only pass a dataset consisting images and labels.
x, y = data
# Compute predictions
y_prediction = self.student(x, training=False)
# Update the metrics
self.compiled_metrics.update_state(y, tf.nn.softmax(y_prediction, axis=1))
# Return a dict of performance
results = {m.name: m.result() for m in self.metrics}
return results
"""
The only difference in this implementation is the way loss is being calculated. **Instead
of weighted the distillation loss and student loss differently we are taking their
average following Noisy Student Training**.
"""
"""
## Train the student model
"""
# Define the callbacks.
# We are using a larger decay factor to stabilize the training.
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(
patience=3, factor=0.5, monitor="val_accuracy"
)
early_stopping = tf.keras.callbacks.EarlyStopping(
patience=10, restore_best_weights=True, monitor="val_accuracy"
)
# Compile and train the student model.
self_trainer = SelfTrainer(student=get_training_model(), teacher=teacher_model)
self_trainer.compile(
# Notice we are *not* using SWA here.
optimizer="adam",
metrics=["accuracy"],
student_loss_fn=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
distillation_loss_fn=tf.keras.losses.KLDivergence(),
temperature=10,
)
history = self_trainer.fit(
consistency_training_ds,
epochs=EPOCHS,
validation_data=validation_ds,
callbacks=[reduce_lr, early_stopping],
)
# Evaluate the student model.
acc = self_trainer.evaluate(test_ds, verbose=0)
print(f"Test accuracy from student model: {acc*100}%")
"""
## Assess the robustness of the models
A standard benchmark of assessing the robustness of vision models is to record their
performance on corrupted datasets like ImageNet-C and CIFAR-10-C both of which were
proposed in [Benchmarking Neural Network Robustness to Common Corruptions and
Perturbations](https://arxiv.org/abs/1903.12261). For this example, we will be using the
CIFAR-10-C dataset which has 19 different corruptions on 5 different severity levels. To
assess the robustness of the models on this dataset, we will do the following:
* Run the pre-trained models on the highest level of severities and obtain the top-1
accuracies.
* Compute the mean top-1 accuracy.
For the purpose of this example, we won't be going through these steps. This is why we
trained the models for only 5 epochs. You can check out [this
repository](https://github.com/sayakpaul/Consistency-Training-with-Supervision) that
demonstrates the full-scale training experiments and also the aforementioned assessment.
The figure below presents an executive summary of that assessment:
![](https://i.ibb.co/HBJkM9R/image.png)
**Mean Top-1** results stand for the CIFAR-10-C dataset and **Test Top-1** results stand
for the CIFAR-10 test set. It's clear that consistency training has an advantage on not
only enhancing the model robustness but also on improving the standard test performance.
"""
| apache-2.0 |
herilalaina/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 57 | 2848 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009 [1]_.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
herilalaina/scikit-learn | doc/conf.py | 4 | 9872 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx_gallery.gen_gallery',
'sphinx_issues',
]
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
numpydoc_class_members_toctree = False
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2007 - 2017, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}
\usepackage{morefloats}\usepackage{enumitem} \setlistdepth{10}
"""
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
# intersphinx configuration
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(
sys.version_info), None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/', None),
}
sphinx_gallery_conf = {
'doc_module': 'sklearn',
'backreferences_dir': os.path.join('modules', 'generated'),
'reference_url': {
'sklearn': None}
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600,
'sphx_glr_plot_outlier_detection_003.png': 372,
'sphx_glr_plot_gpr_co2_001.png': 350,
'sphx_glr_plot_adaboost_twoclass_001.png': 372,
'sphx_glr_plot_compare_methods_001.png': 349}
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
# Config for sphinx_issues
issues_uri = 'https://github.com/scikit-learn/scikit-learn/issues/{issue}'
issues_github_path = 'scikit-learn/scikit-learn'
issues_user_uri = 'https://github.com/{user}'
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('build-finished', make_carousel_thumbs)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |