repo_name
stringlengths 6
103
| path
stringlengths 5
191
| copies
stringlengths 1
4
| size
stringlengths 4
6
| content
stringlengths 986
970k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Nu3001/external_chromium_org | third_party/jinja2/visitor.py | 1402 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| bsd-3-clause |
anapaulagomes/dashboard | lib/jinja2/visitor.py | 1402 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| apache-2.0 |
AOSPU/external_chromium_org | third_party/jinja2/visitor.py | 1402 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| bsd-3-clause |
SaschaMester/delicium | third_party/jinja2/visitor.py | 1402 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| bsd-3-clause |
krisgiesing/sky_engine | tools/generate_package_files.py | 3 | 1316 | #!/usr/bin/env python
# Copyright 2018 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script generates .packages file for frontend_server and
# flutter_kernel_transformers from Dart SDKs .packages file located in
# third_party/dart/.packages
import os
import shutil
ALL_PACKAGES = {
'frontend_server': ['flutter_kernel_transformers'],
'flutter_kernel_transformers': [],
}
SRC_DIR = os.getcwd()
DOT_PACKAGES = '.packages'
DART_PACKAGES_FILE = os.path.join(SRC_DIR, 'third_party', 'dart', DOT_PACKAGES)
# Generate .packages file in the given package.
def GeneratePackages(package, local_deps):
with open(os.path.join('flutter', package, DOT_PACKAGES), 'w') as packages:
with open(DART_PACKAGES_FILE, 'r') as dart_packages:
for line in dart_packages:
if line.startswith('#'):
packages.write(line)
else:
[name, path] = line.split(':', 1)
packages.write('%s:../../third_party/dart/%s' % (name, path))
packages.write('%s:./lib\n' % (package))
for other_package in local_deps:
packages.write('%s:../%s/lib\n' % (other_package, other_package))
for package, local_deps in ALL_PACKAGES.iteritems():
GeneratePackages(package, local_deps)
| bsd-3-clause |
ageron/tensorflow | tensorflow/contrib/learn/python/learn/__init__.py | 40 | 2715 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level API for learning with TensorFlow (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import basic_session_run_hooks
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import estimators
from tensorflow.contrib.learn.python.learn import graph_actions
from tensorflow.contrib.learn.python.learn import learn_io as io
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import ops
from tensorflow.contrib.learn.python.learn import preprocessing
from tensorflow.contrib.learn.python.learn import utils
from tensorflow.contrib.learn.python.learn.estimators import *
from tensorflow.contrib.learn.python.learn.evaluable import Evaluable
from tensorflow.contrib.learn.python.learn.experiment import Experiment
from tensorflow.contrib.learn.python.learn.export_strategy import ExportStrategy
from tensorflow.contrib.learn.python.learn.graph_actions import evaluate
from tensorflow.contrib.learn.python.learn.graph_actions import infer
from tensorflow.contrib.learn.python.learn.graph_actions import run_feeds
from tensorflow.contrib.learn.python.learn.graph_actions import run_n
from tensorflow.contrib.learn.python.learn.graph_actions import train
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.learn.python.learn.monitors import NanLossDuringTrainingError
from tensorflow.contrib.learn.python.learn.trainable import Trainable
from tensorflow.contrib.learn.python.learn.utils import *
# pylint: enable=wildcard-import
| apache-2.0 |
pandegroup/vs-utils | vs_utils/scripts/datasets/parse_tox21_datasets.py | 3 | 2618 | #!/usr/bin/env python
"""
Get Tox21 challenge datasets.
Some SMILES strings are represented by multiple compounds, with some overlap of
assays. These compounds need to be condensed and their assay outcomes need to
be reconciled.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
import argparse
import cPickle
import gzip
import numpy as np
import os
import warnings
from vs_utils.utils.target_utils import Tox21Parser
def get_args(input_args=None):
"""
Get command-line arguments.
Parameters
----------
input_args : list, optional
Input arguments. If not provided, defaults to sys.argv[1:].
"""
parser = argparse.ArgumentParser()
parser.add_argument('input',
help='Input filename.')
parser.add_argument('--merge', choices=['max', 'min', 'majority_neg',
'majority_pos'], default='max',
help='Target merge strategy.')
parser.add_argument('-d', '--dir', default='.',
help='Directory in which to write target files.')
return parser.parse_args(input_args)
def main(filename, merge_strategy, directory='.'):
"""
Get Tox21 chellenge datasets.
Parameters
----------
filename : str
Data filename.
merge_strategy : str, optional (default 'max')
Strategy to use when merging targets for duplicated molecules. Choose
from 'max' (active if active in any assay), 'min' (inactive if inactive
in any assay), 'majority_pos' (majority vote with ties assigned
active), or 'majority_neg' (majority vote with ties assigned inactive).
directory : str, optional (default '.')
Directory in which to write target files.
"""
parser = Tox21Parser(filename, merge_strategy=merge_strategy)
data = parser.get_targets()
# save individual datasets
for dataset in data:
if not len(data[dataset]):
warnings.warn('Dataset "{}" is empty.'.format(dataset))
continue
targets = data[dataset]['targets']
pos = np.count_nonzero(targets == 1)
neg = np.count_nonzero(targets == 0)
assert pos + neg == targets.size
print '{}\t{}\t{}'.format(dataset, pos, neg)
filename = os.path.join(directory, '{}-classes.pkl.gz'.format(dataset))
with gzip.open(filename, 'wb') as f:
cPickle.dump(data[dataset], f, cPickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
args = get_args()
main(args.input, args.merge, args.dir)
| gpl-3.0 |
nick-monto/SpeechRecog_CNN | model_keras_16k.py | 1 | 5268 | import os
import fnmatch
import pandas as pd
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.callbacks import ModelCheckpoint
img_height, img_width = 120, 200
num_epochs = 5
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result[0]
def load_image(path):
img = Image.open(path).convert('L') # read in as grayscale
img = img.resize((img_width, img_height))
# img.load() # loads the image into memory
img_data = np.asarray(img, dtype="float")
return img_data
df_train = pd.read_table('img_set_16k_train.txt',
delim_whitespace=True,
names=['stimulus', 'language'])
stim_train = df_train['stimulus']
labels_train = pd.get_dummies(df_train['language'])
labels_train = labels_train.values
df_val = pd.read_table('img_set_16k_val.txt',
delim_whitespace=True,
names=['stimulus', 'language'])
stim_val = df_val['stimulus']
labels_val = pd.get_dummies(df_val['language'])
labels_val = labels_val.values
training_data_dir = 'Input_spectrogram_16k/Training' # directory for training data
val_data_dir = 'Input_spectrogram_16k/Validation' # directory for test data
print("Preparing the input and labels...")
specs_train_input = []
for i in range(len(stim_train)):
specs_train_input.append(load_image(find(stim_train.iloc[i],
training_data_dir)))
specs_train_input = np.asarray(specs_train_input)
specs_train_input = specs_train_input.reshape((len(stim_train),
img_height, img_width, 1))
print('There are a total of {} training stimuli!'.format(specs_train_input.shape[0]))
specs_val_input = []
for i in range(len(stim_val)):
specs_val_input.append(load_image(find(stim_val.iloc[i],
val_data_dir)))
specs_val_input = np.asarray(specs_val_input)
specs_val_input = specs_val_input.reshape((len(stim_val),
img_height, img_width, 1))
print('There are a total of {} validation stimuli!'.format(specs_val_input.shape[0]))
print("Done!")
# set of augments that will be applied to the training data
datagen = ImageDataGenerator(rescale=1./255)
checkpoint = ModelCheckpoint('./weights_16k_updated.best.hdf5',
monitor='val_acc',
verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
# Define the model: 4 convolutional layers, 4 max pools
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=(img_height, img_width, 1),
strides=(2, 2),
activation='relu',
kernel_initializer='TruncatedNormal',
name='conv1'))
model.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))
model.add(Conv2D(64, (3, 3), padding='same',
strides=(2, 2),
activation='relu',
kernel_initializer='TruncatedNormal',
name='conv2'))
model.add(MaxPooling2D(pool_size=(2, 2), name='pool2'))
model.add(Conv2D(128, (3, 3), padding='same',
activation='relu',
kernel_initializer='TruncatedNormal',
name='conv3'))
model.add(MaxPooling2D(pool_size=(2, 2), name='pool3'))
model.add(Conv2D(256, (3, 3), padding='same',
activation='relu',
kernel_initializer='TruncatedNormal',
name='conv4'))
model.add(MaxPooling2D(pool_size=(2, 2), name='pool4'))
model.add(Flatten(name='flat1')) # converts 3D feature mapes to 1D feature vectors
model.add(Dense(256, activation='relu',
kernel_initializer='TruncatedNormal', name='fc1'))
model.add(Dropout(0.5, name='do1')) # reset half of the weights to zero
model.add(Dense(8, activation='softmax', name='output'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# compute quantities required for featurewise normalization
datagen.fit(specs_train_input)
datagen.fit(specs_val_input)
print("Initializing the model...")
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(specs_train_input,
labels_train,
batch_size=16),
steps_per_epoch=len(stim_train) / 16,
epochs=num_epochs,
verbose=1,
callbacks=callbacks_list,
validation_data=datagen.flow(specs_val_input,
labels_val,
batch_size=8),
validation_steps=len(stim_val) / 8)
model.save('LangNet_4Conv_updated.h5')
| mit |
mxjl620/scikit-learn | sklearn/linear_model/bayes.py | 219 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
pansapiens/mytardis | tardis/tardis_portal/south_migrations/0028_convert_replicas.py | 3 | 27093 | # -*- coding: utf-8 -*-
import datetime
import os
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
# Locations
for location in orm.Location.objects.all():
newsb = orm.StorageBox()
newsb.name = location.name
newsb.description = "converted from Location"
newsb.status = "dirty"
if location.type in ('local', 'online') and \
location.transfer_provider in ('local', ):
base_dir = location.url.replace('file://', '')
try:
base_dir_stat = os.statvfs(base_dir)
except OSError:
# for running this on a test db
print 'Cannot access location %s. OK for testing only' % (
location.name,)
base_dir_stat = os.statvfs('/')
disk_size = base_dir_stat.f_frsize * base_dir_stat.f_blocks
newsb.max_size = disk_size
newsb.save()
sb_opt = orm.StorageBoxOption(storage_box=newsb,
key="location",
value=base_dir)
sb_opt.save()
else:
# placeholder storage class. Manual intervention required
newsb.django_storage_class = \
'tardis.tardis_portal.storage.DummyStorage'
newsb.max_size = 0
newsb.save()
# Replicas
total = float(orm.Replica.objects.all().count())
counter = 0
percent = 0
print 'total replicas: %d ' % int(total)
import resource
print 'memory used: %d' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
for replica in orm.Replica.objects.all().iterator():
new_dfo = orm.DataFileObject()
new_dfo.datafile = replica.datafile
new_dfo.uri = replica.url
new_dfo.storage_box = orm.StorageBox.objects.get(
name=replica.location.name)
new_dfo.save()
new_dfo.datafile.dataset.storage_boxes.add(new_dfo.storage_box)
counter += 1
if int(counter/total * 100) > percent:
percent += 1
print '{0} % done '.format(percent),
print 'memory used: %d' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if 0 == 1:
files_failed_verification = []
# verify all files
for dfo in orm.DataFileObject.objects.all():
if dfo.storage_box.django_storage_class != \
'tardis.tardis_portal.storage.DummyStorage':
if not dfo.verify():
files_failed_verification.append(dfo)
print files_failed_verification or "All files migrated fine"
def backwards(self, orm):
"Write your backwards methods here."
raise RuntimeError("Cannot reverse this migration.")
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'tardis_portal.author_experiment': {
'Meta': {'ordering': "['order']", 'unique_together': "(('experiment', 'author'),)", 'object_name': 'Author_Experiment'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '2000', 'blank': 'True'})
},
'tardis_portal.datafile': {
'Meta': {'ordering': "['filename']", 'unique_together': "(['directory', 'filename', 'version'],)", 'object_name': 'DataFile'},
'created_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'directory': ('tardis.tardis_portal.models.fields.DirectoryField', [], {'null': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5sum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'mimetype': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'modification_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sha512sum': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'tardis_portal.datafileobject': {
'Meta': {'unique_together': "(['datafile', 'storage_box'],)", 'object_name': 'DataFileObject'},
'created_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datafile': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'file_objects'", 'to': "orm['tardis_portal.DataFile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_verified_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'storage_box': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'file_objects'", 'to': "orm['tardis_portal.StorageBox']"}),
'uri': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'tardis_portal.datafileparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'DatafileParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.DatafileParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.datafileparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'DatafileParameterSet'},
'datafile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.DataFile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"}),
'storage_box': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'datafileparametersets'", 'symmetrical': 'False', 'to': "orm['tardis_portal.StorageBox']"})
},
'tardis_portal.dataset': {
'Meta': {'ordering': "['-id']", 'object_name': 'Dataset'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'directory': ('tardis.tardis_portal.models.fields.DirectoryField', [], {'null': 'True', 'blank': 'True'}),
'experiments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'datasets'", 'symmetrical': 'False', 'to': "orm['tardis_portal.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'storage_boxes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'datasets'", 'blank': 'True', 'to': "orm['tardis_portal.StorageBox']"})
},
'tardis_portal.datasetparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'DatasetParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.DatasetParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.datasetparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'DatasetParameterSet'},
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Dataset']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"}),
'storage_box': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'datasetparametersets'", 'symmetrical': 'False', 'to': "orm['tardis_portal.StorageBox']"})
},
'tardis_portal.experiment': {
'Meta': {'object_name': 'Experiment'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'created_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'handle': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution_name': ('django.db.models.fields.CharField', [], {'default': "'Monash University'", 'max_length': '400'}),
'license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.License']", 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'public_access': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.experimentparameter': {
'Meta': {'ordering': "['name']", 'object_name': 'ExperimentParameter'},
'datetime_value': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"}),
'numerical_value': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parameterset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ExperimentParameterSet']"}),
'string_value': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'tardis_portal.experimentparameterset': {
'Meta': {'ordering': "['id']", 'object_name': 'ExperimentParameterSet'},
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"}),
'storage_box': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'experimentparametersets'", 'symmetrical': 'False', 'to': "orm['tardis_portal.StorageBox']"})
},
'tardis_portal.freetextsearchfield': {
'Meta': {'object_name': 'FreeTextSearchField'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter_name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.ParameterName']"})
},
'tardis_portal.groupadmin': {
'Meta': {'object_name': 'GroupAdmin'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'tardis_portal.license': {
'Meta': {'object_name': 'License'},
'allows_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '2000', 'blank': 'True'}),
'internal_description': ('django.db.models.fields.TextField', [], {}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '400'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '2000'})
},
'tardis_portal.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'priority': ('django.db.models.fields.IntegerField', [], {}),
'transfer_provider': ('django.db.models.fields.CharField', [], {'default': "'local'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '400'})
},
'tardis_portal.objectacl': {
'Meta': {'ordering': "['content_type', 'object_id']", 'object_name': 'ObjectACL'},
'aclOwnershipType': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'canDelete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canRead': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canWrite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'effectiveDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'entityId': ('django.db.models.fields.CharField', [], {'max_length': '320'}),
'expiryDate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isOwner': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'pluginId': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'tardis_portal.parametername': {
'Meta': {'ordering': "('order', 'name')", 'unique_together': "(('schema', 'name'),)", 'object_name': 'ParameterName'},
'choices': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'comparison_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'data_type': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '9999', 'null': 'True', 'blank': 'True'}),
'schema': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Schema']"}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'})
},
'tardis_portal.providerparameter': {
'Meta': {'unique_together': "(('location', 'name'),)", 'object_name': 'ProviderParameter'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'})
},
'tardis_portal.replica': {
'Meta': {'unique_together': "(('datafile', 'location'),)", 'object_name': 'Replica'},
'datafile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.DataFile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Location']"}),
'protocol': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'stay_remote': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'tardis_portal.schema': {
'Meta': {'object_name': 'Schema'},
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immutable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'namespace': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'subtype': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'tardis_portal.storagebox': {
'Meta': {'object_name': 'StorageBox'},
'description': ('django.db.models.fields.TextField', [], {'default': "'Default Storage'"}),
'django_storage_class': ('django.db.models.fields.TextField', [], {'default': "'tardis.tardis_portal.storage.MyTardisLocalFileSystemStorage'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_size': ('django.db.models.fields.BigIntegerField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.TextField', [], {'default': "'default'", 'unique': 'True'})
},
'tardis_portal.storageboxattribute': {
'Meta': {'object_name': 'StorageBoxAttribute'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'storage_box': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'to': "orm['tardis_portal.StorageBox']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'tardis_portal.storageboxoption': {
'Meta': {'object_name': 'StorageBoxOption'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'storage_box': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['tardis_portal.StorageBox']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'tardis_portal.token': {
'Meta': {'object_name': 'Token'},
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.Experiment']"}),
'expiry_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2014, 4, 23, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'tardis_portal.userauthentication': {
'Meta': {'object_name': 'UserAuthentication'},
'authenticationMethod': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'userProfile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tardis_portal.UserProfile']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'tardis_portal.userprofile': {
'Meta': {'object_name': 'UserProfile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isDjangoAccount': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['tardis_portal']
symmetrical = True
| bsd-3-clause |
mxjl620/scikit-learn | sklearn/svm/tests/test_bounds.py | 277 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
yonglehou/scikit-learn | doc/sphinxext/gen_rst.py | 141 | 40026 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
pkruskal/scikit-learn | sklearn/neighbors/unsupervised.py | 105 | 4461 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> rng = neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
>>> np.asarray(rng[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
| bsd-3-clause |
pansapiens/mytardis | tardis/tardis_portal/download.py | 1 | 22005 | # -*- coding: utf-8 -*-
"""
download.py
.. moduleauthor:: Steve Androulakis <steve.androulakis@monash.edu>
.. moduleauthor:: Ulrich Felzmann <ulrich.felzmann@versi.edu.au>
.. moduleauthor:: Grischa Meyer <grischa.meyer@monash.edu>
"""
import logging
import urllib
import os
import cStringIO as StringIO
import time
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
import binascii
crc32 = binascii.crc32
from itertools import chain
import tarfile
from tarfile import TarFile
import gzip
import io
from django.core.servers.basehttp import FileWrapper
from django.http import HttpResponseRedirect, StreamingHttpResponse
from django.conf import settings
from django.utils.dateformat import format as dateformatter
from django.utils.importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from django.contrib.auth.decorators import login_required
from tardis.tardis_portal.models import Dataset
from tardis.tardis_portal.models import DataFile
from tardis.tardis_portal.models import Experiment
from tardis.tardis_portal.auth.decorators import has_datafile_download_access
from tardis.tardis_portal.auth.decorators import experiment_download_required
from tardis.tardis_portal.auth.decorators import dataset_download_required
from tardis.tardis_portal.shortcuts import render_error_message
from tardis.tardis_portal.views import return_response_not_found, \
return_response_error
logger = logging.getLogger(__name__)
DEFAULT_ORGANIZATION = settings.DEFAULT_ARCHIVE_ORGANIZATION
def _create_download_response(request, datafile_id, disposition='attachment'): # too complex # noqa
# Get datafile (and return 404 if absent)
try:
datafile = DataFile.objects.get(pk=datafile_id)
except DataFile.DoesNotExist:
return return_response_not_found(request)
# Check users has access to datafile
if not has_datafile_download_access(request=request,
datafile_id=datafile.id):
return return_response_error(request)
# Send an image that can be seen in the browser
if disposition == 'inline' and datafile.is_image():
from tardis.tardis_portal.iiif import download_image
args = (request, datafile.id, 'full', 'full', '0', 'native')
# Send unconverted image if web-compatible
if datafile.get_mimetype() in ('image/gif', 'image/jpeg', 'image/png'):
return download_image(*args)
# Send converted image
return download_image(*args, format='png')
# Send local file
try:
verified_only = True
# Query parameter to allow download of unverified files
ignore_verif = request.GET.get('ignore_verification_status', '0')
# Ensure ignore_verification_status=0 etc works as expected
# a bare ?ignore_verification_status is True
if ignore_verif.lower() in [u'', u'1', u'true']:
verified_only = False
# Get file object for datafile
file_obj = datafile.get_file(verified_only=verified_only)
if not file_obj:
# If file path doesn't resolve, return not found
if verified_only:
return render_error_message(request,
"File is unverified, "
"please try again later.",
status=503)
else:
return return_response_not_found(request)
wrapper = FileWrapper(file_obj, blksize=65535)
response = StreamingHttpResponse(wrapper,
content_type=datafile.get_mimetype())
response['Content-Disposition'] = \
'%s; filename="%s"' % (disposition, datafile.filename)
return response
except IOError:
# If we can't read the file, return not found
return return_response_not_found(request)
except ValueError: # raised when replica not verified TODO: custom excptn
redirect = request.META.get('HTTP_REFERER',
'http://%s/' %
request.META.get('HTTP_HOST'))
message = """The file you are trying to access has not yet been
verified. Verification is an automated background process.
Please try again later or contact the system
administrator if the issue persists."""
message = ' '.join(message.split()) # removes spaces
redirect = redirect + '#error:' + message
return HttpResponseRedirect(redirect)
def view_datafile(request, datafile_id):
return _create_download_response(request, datafile_id, 'inline')
def download_datafile(request, datafile_id):
return _create_download_response(request, datafile_id)
__mapper_makers = None
def get_download_organizations():
return _get_mapper_makers().keys()
def _get_mapper_makers():
global __mapper_makers
if not __mapper_makers:
__mapper_makers = {}
mappers = getattr(settings, 'ARCHIVE_FILE_MAPPERS', [])
for (organization, mapper_desc) in mappers.items():
mapper_fn = _safe_import(mapper_desc[0])
if len(mapper_desc) >= 2:
kwarg = mapper_desc[1]
else:
kwarg = {}
def mapper_maker_maker(kwarg):
def mapper_maker(rootdir):
myKwarg = dict(kwarg)
myKwarg['rootdir'] = rootdir
def mapper(datafile):
return mapper_fn(datafile, **myKwarg)
return mapper
return mapper_maker
__mapper_makers[organization] = mapper_maker_maker(kwarg)
return __mapper_makers
def _safe_import(path):
try:
dot = path.rindex('.')
except ValueError:
raise ImproperlyConfigured('%s isn\'t an archive mapper' % path)
mapper_module, mapper_fname = path[:dot], path[dot + 1:]
try:
mod = import_module(mapper_module)
except ImportError, e:
raise ImproperlyConfigured('Error importing mapper %s: "%s"' %
(mapper_module, e))
try:
return getattr(mod, mapper_fname)
except AttributeError:
raise ImproperlyConfigured(
'Mapper module "%s" does not define a "%s" function' %
(mapper_module, mapper_fname))
def _make_mapper(organization, rootdir):
if organization == 'classic':
return classic_mapper(rootdir)
else:
mapper_makers = _get_mapper_makers()
mapper_maker = mapper_makers.get(organization)
if mapper_maker:
return mapper_maker(rootdir)
else:
return None
def classic_mapper(rootdir):
def _get_filename(df):
return os.path.join(rootdir, str(df.dataset.id), df.filename)
return _get_filename
def _get_datafile_details_for_archive(mapper, datafiles):
# It would be simplest to do this lazily. But if we do that, we implicitly
# passing the database context to the thread that will write the archive,
# and that is a bit dodgy. (It breaks in unit tests!) Instead, we
# populate the list eagerly, but with a file getter rather than the file
# itself. If we populate with actual File objects, we risk running out
# of file descriptors.
res = []
for df in datafiles:
mapped_pathname = mapper(df)
if mapped_pathname:
res.append((df, mapper(df)))
return res
########### NEW DOWNLOAD ##############
class UncachedTarStream(TarFile):
'''
Stream files into a compressed tar stream on the fly
'''
def __init__(self, mapped_file_objs, filename, do_gzip=False,
buffersize=2*65536, comp_level=6, http_buffersize=65535):
self.errors = 'strict'
self.pax_headers = {}
self.mode = 'w'
self.closed = False
self.members = []
self._loaded = False
self.offset = 0
self.inodes = {}
self._loaded = True
self.mapped_file_objs = mapped_file_objs
filenum = len(mapped_file_objs)
self.tarinfos = [None] * filenum
self.tarinfo_bufs = [None] * filenum
self.filename = filename
self.buffersize = buffersize
self.http_buffersize = http_buffersize
self.do_gzip = do_gzip
if do_gzip:
self.binary_buffer = io.BytesIO()
self.gzipfile = gzip.GzipFile(bytes(filename), 'w',
comp_level, self.binary_buffer)
else:
self.tar_size = self.compute_size()
def compute_size(self):
total_size = 0
for num, fobj in enumerate(self.mapped_file_objs):
df, name = fobj
tarinfo = self.tarinfo_for_df(df, name)
self.tarinfos[num] = tarinfo
tarinfo_buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.tarinfo_bufs[num] = tarinfo_buf
total_size += len(tarinfo_buf)
size = int(tarinfo.size)
blocks, remainder = divmod(size, tarfile.BLOCKSIZE)
if remainder > 0:
blocks += 1
total_size += blocks * tarfile.BLOCKSIZE
blocks, remainder = divmod(total_size, tarfile.RECORDSIZE)
if remainder > 0:
blocks += 1
total_size = blocks * tarfile.RECORDSIZE
return total_size
def tarinfo_for_df(self, df, name):
tarinfo = self.tarinfo(name)
tarinfo.size = int(df.get_size())
mtime = None
dj_mtime = df.modification_time
if dj_mtime is not None:
mtime = dateformatter(dj_mtime, 'U')
else:
try:
fileobj = df.file_object
mtime = os.fstat(fileobj.fileno()).st_mtime
except:
raise Exception('cannot read size for downloads')
if mtime is None:
mtime = time.time()
tarinfo.mtime = mtime
return tarinfo
def compress(self, buf):
self.gzipfile.write(buf)
self.gzipfile.flush()
self.binary_buffer.seek(0)
result = self.binary_buffer.read()
self.binary_buffer.seek(0)
self.binary_buffer.truncate()
return result
def prepare_output(self, uc_buf, remainder):
if self.do_gzip:
result_buf = self.compress(uc_buf)
else:
result_buf = uc_buf
if remainder is not None:
result_buf = ''.join([remainder, result_buf])
stream_buffers = []
while len(result_buf) >= self.http_buffersize:
stream_buffers.append(result_buf[:self.http_buffersize])
result_buf = result_buf[self.http_buffersize:]
return stream_buffers, result_buf
def close_gzip(self):
self.gzipfile.close()
self.binary_buffer.seek(0)
result = self.binary_buffer.read()
self.binary_buffer.seek(0)
self.binary_buffer.truncate()
print len(result)
return result
def make_tar(self): # noqa
'''
main tar generator. until python 3 needs to be in one function
because 'yield's don't bubble up.
'''
remainder_buf = None
for num, fobj in enumerate(self.mapped_file_objs):
df, name = fobj
fileobj = df.file_object
self._check('aw')
tarinfo = self.tarinfos[num]
buf = self.tarinfo_bufs[num]
stream_buffers, remainder_buf = self.prepare_output(
buf,
remainder_buf)
for stream_buf in stream_buffers:
yield stream_buf
self.offset += len(buf or '')
if tarinfo.isreg():
if tarinfo.size == 0:
continue
# split into file read buffer sized chunks
blocks, remainder = divmod(tarinfo.size, self.buffersize)
for b in xrange(blocks):
buf = fileobj.read(self.buffersize)
if len(buf) < self.buffersize:
raise IOError("end of file reached")
# send in http_buffersize sized chunks
stream_buffers, remainder_buf = self.prepare_output(
buf, remainder_buf)
for stream_buf in stream_buffers:
yield stream_buf
# in case the file has remaining read bytes
if remainder != 0:
buf = fileobj.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
# send remaining file data
stream_buffers, remainder_buf = self.prepare_output(
buf, remainder_buf)
for stream_buf in stream_buffers:
yield stream_buf
blocks, remainder = divmod(tarinfo.size, tarfile.BLOCKSIZE)
if remainder > 0:
buf = (tarfile.NUL * (tarfile.BLOCKSIZE - remainder))
stream_buffers, remainder_buf = self.prepare_output(
buf, remainder_buf)
for stream_buf in stream_buffers:
yield stream_buf
blocks += 1
self.offset += blocks * tarfile.BLOCKSIZE
fileobj.close()
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, tarfile.RECORDSIZE)
if remainder > 0:
buf = tarfile.NUL * (tarfile.RECORDSIZE - remainder)
stream_buffers, remainder_buf = self.prepare_output(
buf, remainder_buf)
for stream_buf in stream_buffers:
yield stream_buf
if remainder_buf and len(remainder_buf) > 0:
yield remainder_buf
if self.do_gzip:
yield self.close_gzip()
def get_response(self):
if self.do_gzip:
content_type = 'application/x-gzip'
content_length = None
self.filename += '.gz'
else:
content_type = 'application/x-tar'
content_length = self.tar_size
response = StreamingHttpResponse(self.make_tar(),
content_type=content_type)
response['Content-Disposition'] = 'attachment; filename="%s"' % \
self.filename
response['X-Accel-Buffering'] = 'no'
if content_length is not None:
response['Content-Length'] = content_length
return response
def _streaming_downloader(request, datafiles, rootdir, filename,
comptype='tgz', organization=DEFAULT_ORGANIZATION):
'''
private function to be called by wrappers
creates download response with given files and names
'''
mapper = _make_mapper(organization, rootdir)
if not mapper:
return render_error_message(
request, 'Unknown download organization: %s' % organization,
status=400)
try:
files = _get_datafile_details_for_archive(mapper, datafiles)
tfs = UncachedTarStream(
files,
filename=filename,
do_gzip=comptype != 'tar')
return tfs.get_response()
except ValueError: # raised when replica not verified TODO: custom excptn
redirect = request.META.get('HTTP_REFERER',
'http://%s/' %
request.META.get('HTTP_HOST'))
message = """The experiment you are trying to access has not yet been
verified completely.
Verification is an automated background process.
Please try again later or contact the system
administrator if the issue persists."""
message = ' '.join(message.split()) # removes spaces
redirect = redirect + '#error:' + message
return HttpResponseRedirect(redirect)
@experiment_download_required
def streaming_download_experiment(request, experiment_id, comptype='tgz',
organization=DEFAULT_ORGANIZATION):
experiment = Experiment.objects.get(id=experiment_id)
rootdir = experiment.title.replace(' ', '_')
filename = '%s-complete.tar' % rootdir
datafiles = DataFile.objects.filter(
dataset__experiments__id=experiment_id)
return _streaming_downloader(request, datafiles, rootdir, filename,
comptype, organization)
@dataset_download_required
def streaming_download_dataset(request, dataset_id, comptype='tgz',
organization=DEFAULT_ORGANIZATION):
dataset = Dataset.objects.get(id=dataset_id)
rootdir = dataset.description.replace(' ', '_')
filename = '%s-complete.tar' % rootdir
datafiles = DataFile.objects.filter(dataset=dataset)
return _streaming_downloader(request, datafiles, rootdir, filename,
comptype, organization)
def streaming_download_datafiles(request): # too complex # noqa
"""
takes string parameter "comptype" for compression method.
Currently implemented: "tgz" and "tar"
The datafiles to be downloaded are selected using "datafile", "dataset"
or "url" parameters. An "expid" parameter may be supplied for use in
the download archive name. If "url" is used, the "expid" parameter
is also used to limit the datafiles to be downloaded to a given experiment.
"""
# Create the HttpResponse object with the appropriate headers.
# TODO: handle no datafile, invalid filename, all http links
# TODO: intelligent selection of temp file versus in-memory buffering.
logger.error('In download_datafiles !!')
comptype = getattr(settings, 'DEFAULT_ARCHIVE_FORMATS', ['tar'])[0]
organization = getattr(settings, 'DEFAULT_ARCHIVE_ORGANIZATION', 'classic')
if 'comptype' in request.POST:
comptype = request.POST['comptype']
if 'organization' in request.POST:
organization = request.POST['organization']
if 'datafile' in request.POST or 'dataset' in request.POST:
if (len(request.POST.getlist('datafile')) > 0
or len(request.POST.getlist('dataset'))) > 0:
datasets = request.POST.getlist('dataset')
datafiles = request.POST.getlist('datafile')
# Generator to produce datafiles from dataset id
def get_dataset_datafiles(dsid):
for datafile in DataFile.objects.filter(dataset=dsid):
if has_datafile_download_access(
request=request, datafile_id=datafile.id):
yield datafile
# Generator to produce datafile from datafile id
def get_datafile(dfid):
datafile = DataFile.objects.get(pk=dfid)
if has_datafile_download_access(request=request,
datafile_id=datafile.id):
yield datafile
# Take chained generators and turn them into a set of datafiles
df_set = set(chain(chain.from_iterable(map(get_dataset_datafiles,
datasets)),
chain.from_iterable(map(get_datafile,
datafiles))))
else:
return render_error_message(
request,
'No Datasets or Datafiles were selected for downloaded',
status=404)
elif 'url' in request.POST:
if len(request.POST.getlist('url')) != 0:
return render_error_message(
request,
'No Datasets or Datafiles were selected for downloaded',
status=404)
for url in request.POST.getlist('url'):
url = urllib.unquote(url)
raw_path = url.partition('//')[2]
experiment_id = request.POST['expid']
datafile = DataFile.objects.filter(
url__endswith=raw_path,
dataset__experiment__id=experiment_id)[0]
if has_datafile_download_access(request=request,
datafile_id=datafile.id):
df_set = set([datafile])
else:
return render_error_message(
request, 'No Datasets or Datafiles were selected for downloaded',
status=404)
logger.info('Files for archive command: %s' % df_set)
if len(df_set) == 0:
return render_error_message(
request,
'You do not have download access for any of the '
'selected Datasets or Datafiles ',
status=403)
try:
expid = request.POST['expid']
experiment = Experiment.objects.get(id=expid)
except (KeyError, Experiment.DoesNotExist):
experiment = iter(df_set).next().dataset.get_first_experiment()
filename = '%s-selection.tar' % experiment.title.replace(' ', '_')
rootdir = '%s-selection' % experiment.title.replace(' ', '_')
return _streaming_downloader(request, df_set, rootdir, filename,
comptype, organization)
@login_required
def download_api_key(request):
user = request.user
api_key_file = StringIO.StringIO()
api_key_file.write("ApiKey {0}:{1}".format(user, user.api_key.key))
api_key_file.seek(0)
response = StreamingHttpResponse(FileWrapper(api_key_file),
content_type='text/plain')
response['Content-Disposition'] = \
'attachment; filename="{0}.key"' .format(user)
return response
| bsd-3-clause |
dimkal/mne-python | examples/time_frequency/plot_source_label_time_frequency.py | 19 | 3767 | """
=========================================================
Compute power and phase lock in label of the source space
=========================================================
Compute time-frequency maps of power and phase lock in the source space.
The inverse method is linear based on dSPM inverse operator.
The example also shows the difference in the time-frequency maps
when they are computed with and without subtracting the evoked response
from each epoch. The former results in induced activity only while the
latter also includes evoked (stimulus-locked) activity.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_induced_power
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
label_name = 'Aud-rh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
tmin, tmax, event_id = -0.2, 0.5, 2
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Picks MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
# Load epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject,
preload=True)
# Compute a source estimate per frequency band including and excluding the
# evoked response
frequencies = np.arange(7, 30, 2) # define frequencies of interest
label = mne.read_label(fname_label)
n_cycles = frequencies / 3. # different number of cycle per frequency
# subtract the evoked response in order to exclude evoked activity
epochs_induced = epochs.copy().subtract_evoked()
plt.close('all')
for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced],
['evoked + induced',
'induced only'])):
# compute the source space power and phase lock
power, phase_lock = source_induced_power(
this_epochs, inverse_operator, frequencies, label, baseline=(-0.1, 0),
baseline_mode='percent', n_cycles=n_cycles, n_jobs=1)
power = np.mean(power, axis=0) # average over sources
phase_lock = np.mean(phase_lock, axis=0) # average over sources
times = epochs.times
##########################################################################
# View time-frequency plots
plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 2, 2 * ii + 1)
plt.imshow(20 * power,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Power (%s)' % title)
plt.colorbar()
plt.subplot(2, 2, 2 * ii + 2)
plt.imshow(phase_lock,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0, vmax=0.7,
cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Phase-lock (%s)' % title)
plt.colorbar()
plt.show()
| bsd-3-clause |
ageron/tensorflow | tensorflow/tools/compatibility/tf_upgrade_v2.py | 1 | 78824 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from 1.* TensorFlow to 2.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import functools
import sys
import pasta
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import renames_v2
from tensorflow.tools.compatibility import reorders_v2
# These pylint warnings are a mistake.
# pylint: disable=g-explicit-bool-comparison,g-bool-id-comparison
class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
# If the new argument is None, it will be removed.
# Only keyword args are handled, so make sure to also put any function in
# function_reorders to ensure that all args are made into keywords first.
self.function_keyword_renames = {
"tf.test.assert_equal_graph_def": {
"checkpoint_v2": None,
},
"tf.nn.embedding_lookup": {
"validate_indices": None,
},
"tf.image.sample_distorted_bounding_box": {
"seed2": None,
},
"tf.gradients": {
"colocate_gradients_with_ops": None,
},
"tf.hessians": {
"colocate_gradients_with_ops": None,
},
"*.minimize": {
"colocate_gradients_with_ops": None,
},
"*.compute_gradients": {
"colocate_gradients_with_ops": None,
},
"tf.cond": {
"strict": None,
"fn1": "true_fn",
"fn2": "false_fn"
},
"tf.argmin": {
"dimension": "axis",
},
"tf.argmax": {
"dimension": "axis",
},
"tf.arg_min": {
"dimension": "axis",
},
"tf.arg_max": {
"dimension": "axis",
},
"tf.math.argmin": {
"dimension": "axis",
},
"tf.math.argmax": {
"dimension": "axis",
},
"tf.image.crop_and_resize": {
"box_ind": "box_indices",
},
"tf.image.extract_image_patches": {
"ksizes": "sizes",
},
"tf.image.resize": {
"align_corners": None,
},
"tf.image.resize_images": {
"align_corners": None,
},
"tf.extract_image_patches": {
"ksizes": "sizes",
},
"tf.expand_dims": {
"dim": "axis",
},
"tf.batch_to_space": {
"block_size": "block_shape",
},
"tf.space_to_batch": {
"block_size": "block_shape",
},
"tf.nn.space_to_batch": {
"block_size": "block_shape",
},
"tf.constant": {
"verify_shape": "verify_shape_is_now_always_true",
},
"tf.convert_to_tensor": {
"preferred_dtype": "dtype_hint"
},
"tf.nn.softmax_cross_entropy_with_logits": {
"dim": "axis",
"_sentinel": None,
},
"tf.nn.softmax_cross_entropy_with_logits_v2": {
"dim": "axis"
},
"tf.linalg.l2_normalize": {
"dim": "axis",
},
"tf.linalg.norm": {
"keep_dims": "keepdims",
},
"tf.norm": {
"keep_dims": "keepdims",
},
"tf.load_file_system_library": {
"library_filename": "library_location",
},
"tf.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.math.count_nonzero": {
"input_tensor": "input",
"keep_dims": "keepdims",
"reduction_indices": "axis",
},
"tf.nn.erosion2d": {
"kernel": "filters",
"rates": "dilations",
},
"tf.math.l2_normalize": {
"dim": "axis",
},
"tf.math.log_softmax": {
"dim": "axis",
},
"tf.math.softmax": {
"dim": "axis"
},
"tf.nn.l2_normalize": {
"dim": "axis",
},
"tf.nn.log_softmax": {
"dim": "axis",
},
"tf.nn.moments": {
"keep_dims": "keepdims",
},
"tf.nn.pool": {
"dilation_rate": "dilations"
},
"tf.nn.separable_conv2d": {
"rate": "dilations"
},
"tf.nn.depthwise_conv2d": {
"rate": "dilations"
},
"tf.nn.softmax": {
"dim": "axis"
},
"tf.nn.sufficient_statistics": {
"keep_dims": "keepdims"
},
"tf.debugging.assert_all_finite": {
"t": "x",
"msg": "message",
},
"tf.sparse.add": {
"thresh": "threshold",
},
"tf.sparse_add": {
"thresh": "threshold",
},
"tf.sparse.concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse_concat": {
"concat_dim": "axis",
"expand_nonconcat_dim": "expand_nonconcat_dims",
},
"tf.sparse.split": {
"split_dim": "axis",
},
"tf.sparse_split": {
"split_dim": "axis",
},
"tf.sparse.reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_max": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse.reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis",
"keep_dims": "keepdims",
},
"tf.nn.max_pool_with_argmax": {
"Targmax": "output_dtype",
},
"tf.nn.max_pool": {
"value": "input"
},
"tf.nn.avg_pool": {
"value": "input"
},
"tf.nn.avg_pool2d": {
"value": "input"
},
"tf.multinomial": {
"output_dtype": "dtype",
},
"tf.random.multinomial": {
"output_dtype": "dtype",
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis",
},
"tf.nn.batch_norm_with_global_normalization": {
"t": "input",
"m": "mean",
"v": "variance",
},
"tf.nn.dilation2d": {
"filter": "filters",
"rates": "dilations",
},
"tf.nn.conv3d": {
"filter": "filters"
},
"tf.zeros_like": {
"tensor": "input",
},
"tf.ones_like": {
"tensor": "input",
},
"tf.nn.conv2d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.conv3d_transpose": {
"value": "input",
"filter": "filters",
},
"tf.nn.convolution": {
"filter": "filters",
"dilation_rate": "dilations",
},
"tf.gfile.Exists": {
"filename": "path",
},
"tf.gfile.Remove": {
"filename": "path",
},
"tf.gfile.Stat": {
"filename": "path",
},
"tf.gfile.Glob": {
"filename": "pattern",
},
"tf.gfile.MkDir": {
"dirname": "path",
},
"tf.gfile.MakeDirs": {
"dirname": "path",
},
"tf.gfile.DeleteRecursively": {
"dirname": "path",
},
"tf.gfile.IsDirectory": {
"dirname": "path",
},
"tf.gfile.ListDirectory": {
"dirname": "path",
},
"tf.gfile.Copy": {
"oldpath": "src",
"newpath": "dst",
},
"tf.gfile.Rename": {
"oldname": "src",
"newname": "dst",
},
"tf.gfile.Walk": {
"in_order": "topdown",
},
"tf.random.stateless_multinomial": {
"output_dtype": "dtype",
},
"tf.string_to_number": {
"string_tensor": "input",
},
"tf.strings.to_number": {
"string_tensor": "input",
},
"tf.string_to_hash_bucket": {
"string_tensor": "input",
},
"tf.strings.to_hash_bucket": {
"string_tensor": "input",
},
"tf.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.strings.reduce_join": {
"keep_dims": "keepdims",
"reduction_indices": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis",
},
"tf.nn.weighted_moments": {
"keep_dims": "keepdims"
},
"tf.nn.conv1d": {
"value": "input",
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d": {
"filter": "filters",
"use_cudnn_on_gpu": None,
},
"tf.nn.conv2d_backprop_input": {
"use_cudnn_on_gpu": None,
"input_sizes": "output_shape",
"out_backprop": "input",
"filter": "filters",
},
"tf.contrib.summary.audio": {
"tensor": "data",
"family": None,
},
"tf.contrib.summary.histogram": {
"tensor": "data",
"family": None,
},
"tf.contrib.summary.image": {
"tensor": "data",
"bad_color": None,
"max_images": "max_outputs",
"family": None,
},
"tf.contrib.summary.scalar": {
"tensor": "data",
"family": None,
},
}
# pylint: disable=line-too-long
# Add additional renames not in renames_v2.py here.
# IMPORTANT: For the renames in here, if you also need to add to
# function_reorders or function_keyword_renames, use the OLD function name.
# These renames happen after the arguments have been processed.
self.manual_symbol_renames = {
"tf.batch_to_space_nd":
"tf.batch_to_space",
"tf.batch_gather":
"tf.compat.v1.batch_gather",
"tf.space_to_batch_nd":
"tf.space_to_batch",
"tf.nn.space_to_batch":
"tf.space_to_batch",
"tf.estimator.inputs":
"tf.compat.v1.estimator.inputs",
"tf.extract_image_patches":
"tf.image.extract_image_patches",
"tf.gfile.Copy":
"tf.io.gfile.copy",
"tf.gfile.DeleteRecursively":
"tf.io.gfile.rmtree",
"tf.gfile.Exists":
"tf.io.gfile.exists",
"tf.gfile.Glob":
"tf.io.gfile.glob",
"tf.gfile.GFile":
"tf.io.gfile.GFile",
"tf.gfile.IsDirectory":
"tf.io.gfile.isdir",
"tf.gfile.ListDirectory":
"tf.io.gfile.listdir",
"tf.gfile.MakeDirs":
"tf.io.gfile.makedirs",
"tf.gfile.MkDir":
"tf.io.gfile.mkdir",
"tf.gfile.Open":
"tf.io.gfile.GFile",
"tf.gfile.Remove":
"tf.io.gfile.remove",
"tf.gfile.Rename":
"tf.io.gfile.rename",
"tf.gfile.Stat":
"tf.io.gfile.stat",
"tf.gfile.Walk":
"tf.io.gfile.walk",
"tf.contrib.data.AUTOTUNE":
"tf.data.experimental.AUTOTUNE",
"tf.contrib.data.Counter":
"tf.data.experimental.Counter",
"tf.contrib.data.CheckpointInputPipelineHook":
"tf.data.experimental.CheckpointInputPipelineHook",
"tf.contrib.data.CsvDataset":
"tf.data.experimental.CsvDataset",
"tf.contrib.data.Optional":
"tf.data.experimental.Optional",
"tf.contrib.data.RandomDataset":
"tf.data.experimental.RandomDataset",
"tf.contrib.data.Reducer":
"tf.data.experimental.Reducer",
"tf.contrib.data.SqlDataset":
"tf.data.experimental.SqlDataset",
"tf.contrib.data.StatsAggregator":
"tf.data.experimental.StatsAggregator",
"tf.contrib.data.TFRecordWriter":
"tf.data.experimental.TFRecordWriter",
"tf.contrib.data.assert_element_shape":
"tf.data.experimental.assert_element_shape",
"tf.contrib.data.batch_and_drop_remainder":
"tf.compat.v1.contrib.data.batch_and_drop_remainder",
"tf.contrib.data.bucket_by_sequence_length":
"tf.data.experimental.bucket_by_sequence_length",
"tf.contrib.data.choose_from_datasets":
"tf.data.experimental.choose_from_datasets",
"tf.contrib.data.copy_to_device":
"tf.data.experimental.copy_to_device",
"tf.contrib.data.dense_to_sparse_batch":
"tf.data.experimental.dense_to_sparse_batch",
"tf.contrib.data.enumerate_dataset":
"tf.data.experimental.enumerate_dataset",
"tf.contrib.data.get_next_as_optional":
"tf.data.experimental.get_next_as_optional",
"tf.contrib.data.get_single_element":
"tf.data.experimental.get_single_element",
"tf.contrib.data.group_by_reducer":
"tf.data.experimental.group_by_reducer",
"tf.contrib.data.group_by_window":
"tf.data.experimental.group_by_window",
"tf.contrib.data.ignore_errors":
"tf.data.experimental.ignore_errors",
"tf.contrib.data.latency_stats":
"tf.data.experimental.latency_stats",
"tf.contrib.data.make_batched_features_dataset":
"tf.data.experimental.make_batched_features_dataset",
"tf.contrib.data.make_csv_dataset":
"tf.data.experimental.make_csv_dataset",
"tf.contrib.data.make_saveable_from_iterator":
"tf.data.experimental.make_saveable_from_iterator",
"tf.contrib.data.map_and_batch":
"tf.data.experimental.map_and_batch",
"tf.contrib.data.padded_batch_and_drop_remainder":
"tf.compat.v1.contrib.data.padded_batch_and_drop_remainder",
"tf.contrib.data.parallel_interleave":
"tf.data.experimental.parallel_interleave",
"tf.contrib.data.parse_example_dataset":
"tf.data.experimental.parse_example_dataset",
"tf.contrib.data.prefetch_to_device":
"tf.data.experimental.prefetch_to_device",
"tf.contrib.data.read_batch_features":
"tf.compat.v1.contrib.data.read_batch_features",
"tf.contrib.data.reduce_dataset":
"tf.compat.v1.contrib.data.reduce_dataset",
"tf.contrib.data.rejection_resample":
"tf.data.experimental.rejection_resample",
"tf.contrib.data.sample_from_datasets":
"tf.data.experimental.sample_from_datasets",
"tf.contrib.data.scan":
"tf.data.experimental.scan",
"tf.contrib.data.set_stats_aggregator":
"tf.data.experimental.set_stats_aggregator",
"tf.contrib.data.shuffle_and_repeat":
"tf.data.experimental.shuffle_and_repeat",
"tf.contrib.data.sliding_window_batch":
"tf.compat.v1.contrib.data.sliding_window_batch",
"tf.contrib.data.sloppy_interleave":
"tf.compat.v1.contrib.data.sloppy_interleave",
"tf.contrib.data.unbatch":
"tf.data.experimental.unbatch",
"tf.contrib.data.unique":
"tf.data.experimental.unique",
"tf.contrib.estimator.make_early_stopping_hook":
"tf.estimator.experimental.make_early_stopping_hook",
"tf.contrib.estimator.stop_if_higher_hook":
"tf.estimator.experimental.stop_if_higher_hook",
"tf.contrib.estimator.stop_if_lower_hook":
"tf.estimator.experimental.stop_if_lower_hook",
"tf.contrib.estimator.stop_if_no_decrease_hook":
"tf.estimator.experimental.stop_if_no_decrease_hook",
"tf.contrib.estimator.stop_if_no_increase_hook":
"tf.estimator.experimental.stop_if_no_increase_hook",
"tf.contrib.framework.CriticalSection":
"tf.CriticalSection",
"tf.contrib.framework.is_tensor":
"tf.is_tensor",
"tf.contrib.framework.nest.assert_same_structure":
"tf.nest.assert_same_structure",
"tf.contrib.framework.nest.flatten":
"tf.nest.flatten",
"tf.contrib.framework.nest.is_sequence":
"tf.nest.is_nested",
"tf.contrib.framework.nest.map_structure":
"tf.nest.map_structure",
"tf.contrib.framework.nest.pack_sequence_as":
"tf.nest.pack_sequence_as",
"tf.contrib.util.constant_value":
"tf.get_static_value",
"tf.contrib.saved_model.load_keras_model":
"tf.keras.experimental.load_from_saved_model",
"tf.contrib.saved_model.save_keras_model":
"tf.keras.experimental.export_saved_model",
"tf.contrib.rnn.RNNCell":
"tf.compat.v1.nn.rnn_cell.RNNCell",
"tf.contrib.rnn.LSTMStateTuple":
"tf.nn.rnn_cell.LSTMStateTuple",
"tf.contrib.rnn.BasicLSTMCell":
"tf.compat.v1.nn.rnn_cell.BasicLSTMCell",
"tf.contrib.rnn.BasicRNNCell":
"tf.compat.v1.nn.rnn_cell.BasicRNNCell",
"tf.contrib.rnn.GRUCell":
"tf.compat.v1.nn.rnn_cell.GRUCell",
"tf.contrib.rnn.LSTMCell":
"tf.compat.v1.nn.rnn_cell.LSTMCell",
"tf.contrib.rnn.MultiRNNCell":
"tf.compat.v1.nn.rnn_cell.MultiRNNCell",
"tf.contrib.framework.sort":
"tf.sort",
"tf.contrib.framework.argsort":
"tf.argsort",
"tf.contrib.summary.audio":
"tf.compat.v2.summary.audio",
"tf.contrib.summary.histogram":
"tf.compat.v2.summary.histogram",
"tf.contrib.summary.image":
"tf.compat.v2.summary.image",
"tf.contrib.summary.initialize":
"tf.compat.v1.summary.initialize",
"tf.contrib.summary.scalar":
"tf.compat.v2.summary.scalar",
"tf.count_nonzero":
"tf.math.count_nonzero",
"tf.manip.batch_to_space_nd":
"tf.batch_to_space",
"tf.quantize_v2":
"tf.quantization.quantize",
"tf.sparse_add":
"tf.sparse.add",
"tf.sparse_concat":
"tf.sparse.concat",
"tf.sparse_split":
"tf.sparse.split",
"tf.sparse_matmul":
"tf.linalg.matmul",
"tf.sparse_reduce_sum":
"tf.sparse.reduce_sum",
"tf.sparse_reduce_max":
"tf.sparse.reduce_max",
"tf.random.stateless_multinomial":
"tf.random.stateless_categorical",
"tf.substr":
"tf.strings.substr",
"tf.string_to_hash_bucket":
"tf.strings.to_hash_bucket",
"tf.string_to_number":
"tf.strings.to_number",
"tf.multinomial":
"tf.random.categorical",
"tf.random.multinomial":
"tf.random.categorical",
"tf.reduce_join":
"tf.strings.reduce_join",
"tf.load_file_system_library":
"tf.load_library",
"tf.pywrap_tensorflow":
"tf.compat.v1.pywrap_tensorflow",
"tf.bincount":
"tf.math.bincount",
"tf.confusion_matrix":
"tf.math.confusion_matrix",
"tf.train.confusion_matrix":
"tf.math.confusion_matrix",
"tf.decode_csv":
"tf.io.decode_csv",
"tf.data.Iterator":
"tf.compat.v1.data.Iterator",
"tf.parse_example":
"tf.io.parse_example",
"tf.parse_single_example":
"tf.io.parse_single_example",
"tf.nn.fused_batch_norm":
"tf.compat.v1.nn.fused_batch_norm",
"tf.nn.softmax_cross_entropy_with_logits_v2":
"tf.nn.softmax_cross_entropy_with_logits",
"tf.losses.Reduction.MEAN":
"tf.compat.v1.losses.Reduction.MEAN",
"tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS":
"tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS",
"tf.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS":
"tf.compat.v1.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS",
"tf.lite.constants.FLOAT":
"tf.float32",
"tf.lite.constants.INT32":
"tf.int32",
"tf.lite.constants.INT64":
"tf.int64",
"tf.lite.constants.STRING":
"tf.string",
"tf.lite.constants.QUANTIZED_UINT8":
"tf.uint8",
"tf.arg_max":
"tf.argmax",
"tf.arg_min":
"tf.argmin",
# tf.nn.ctc_loss is still available in 2.0 but behavior
# changed significantly.
"tf.nn.ctc_loss":
"tf.compat.v1.nn.ctc_loss",
# tf.saved_model.load in 1.x has no equivalent in 2.x, but there is a
# symbol with the same name.
"tf.saved_model.load":
"tf.compat.v1.saved_model.load",
"tf.saved_model.load_v2":
"tf.compat.v2.saved_model.load",
"tf.zeros_initializer":
"tf.compat.v1.initializers.zeros",
"tf.ones_initializer":
"tf.compat.v1.initializers.ones",
"tf.constant_initializer":
"tf.compat.v1.initializers.constant",
"tf.random_uniform_initializer":
"tf.compat.v1.initializers.random_uniform",
"tf.random_normal_initializer":
"tf.compat.v1.initializers.random_normal",
"tf.truncated_normal_initializer":
"tf.compat.v1.initializers.truncated_normal",
"tf.image.resize_images":
"tf.image.resize",
"tf.random_poisson":
"tf.random.poisson",
"tf.debugging.assert_greater":
"tf.compat.v1.debugging.assert_greater",
"tf.debugging.assert_greater_equal":
"tf.compat.v1.debugging.assert_greater_equal",
"tf.debugging.assert_integer":
"tf.compat.v1.debugging.assert_integer",
"tf.debugging.assert_less":
"tf.compat.v1.debugging.assert_less",
"tf.debugging.assert_less_equal":
"tf.compat.v1.debugging.assert_less_equal",
"tf.debugging.assert_near":
"tf.compat.v1.debugging.assert_near",
"tf.debugging.assert_negative":
"tf.compat.v1.debugging.assert_negative",
"tf.debugging.assert_non_negative":
"tf.compat.v1.debugging.assert_non_negative",
"tf.debugging.assert_non_positive":
"tf.compat.v1.debugging.assert_non_positive",
"tf.debugging.assert_none_equal":
"tf.compat.v1.debugging.assert_none_equal",
"tf.debugging.assert_type":
"tf.compat.v1.debugging.assert_type",
"tf.debugging.assert_positive":
"tf.compat.v1.debugging.assert_positive",
"tf.debugging.assert_equal":
"tf.compat.v1.debugging.assert_equal",
"tf.debugging.assert_scalar":
"tf.compat.v1.debugging.assert_scalar",
"tf.assert_equal":
"tf.compat.v1.assert_equal",
"tf.assert_less":
"tf.compat.v1.assert_less",
"tf.assert_greater":
"tf.compat.v1.assert_greater",
"tf.debugging.assert_rank":
"tf.compat.v1.debugging.assert_rank",
"tf.debugging.assert_rank_at_least":
"tf.compat.v1.debugging.assert_rank_at_least",
"tf.debugging.assert_rank_in":
"tf.compat.v1.debugging.assert_rank_in",
"tf.assert_rank":
"tf.compat.v1.assert_rank",
"tf.nn.max_pool":
"tf.nn.max_pool2d",
"tf.nn.avg_pool":
"tf.nn.avg_pool2d",
"tf.keras.initializers.zeros":
"tf.compat.v1.keras.initializers.zeros",
"tf.keras.initializers.ones":
"tf.compat.v1.keras.initializers.ones",
"tf.keras.initializers.constant":
"tf.compat.v1.keras.initializers.constant",
"tf.data.experimental.map_and_batch_with_legacy_function":
"tf.compat.v1.data.experimental.map_and_batch_with_legacy_function",
"tf.nn.conv2d_backprop_input":
"tf.nn.conv2d_transpose",
"tf.test.compute_gradient":
"tf.compat.v1.test.compute_gradient",
}
# pylint: enable=line-too-long
# Mapping from function to the new name of the function
self.symbol_renames = renames_v2.renames
self.symbol_renames.update(self.manual_symbol_renames)
self.symbol_renames = {
name: new_name
for name, new_name in self.symbol_renames.items()
}
# Variables that should be changed to functions.
self.change_to_function = {}
# pylint: disable=line-too-long
# This list should just contain names of functions that had
# their arguments reordered. After adding a function name to the list
# run the following to update reorders_v2.py:
# bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
# bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
# pylint: enable=line-too-long
self.reordered_function_names = {
"tf.io.serialize_sparse",
"tf.io.serialize_many_sparse",
"tf.argmax",
"tf.argmin",
"tf.batch_to_space",
"tf.cond",
"tf.nn.space_to_batch",
"tf.boolean_mask",
"tf.convert_to_tensor",
"tf.nn.conv1d",
"tf.nn.conv2d",
"tf.nn.conv2d_backprop_input",
"tf.nn.ctc_beam_search_decoder",
"tf.nn.moments",
"tf.nn.convolution",
"tf.nn.crelu",
"tf.nn.weighted_moments",
"tf.nn.pool",
"tf.nn.separable_conv2d",
"tf.nn.depthwise_conv2d",
"tf.multinomial",
"tf.random.multinomial",
"tf.pad",
"tf.quantize_v2",
"tf.feature_column.categorical_column_with_vocabulary_file",
"tf.shape",
"tf.size",
"tf.random.poisson",
"tf.sparse.add",
"tf.sparse_add",
"tf.sparse.concat",
"tf.sparse_concat",
"tf.sparse.segment_mean",
"tf.sparse.segment_sqrt_n",
"tf.sparse.segment_sum",
"tf.sparse_matmul",
"tf.sparse.reduce_max",
"tf.sparse_reduce_max",
"tf.io.decode_csv",
"tf.strings.length",
"tf.strings.reduce_join",
"tf.strings.substr",
"tf.substr",
"tf.transpose",
"tf.tuple",
"tf.parse_example",
"tf.parse_single_example",
"tf.io.parse_example",
"tf.io.parse_single_example",
"tf.while_loop",
"tf.reduce_all",
"tf.math.reduce_all",
"tf.reduce_any",
"tf.math.reduce_any",
"tf.reduce_min",
"tf.math.reduce_min",
"tf.reduce_max",
"tf.math.reduce_max",
"tf.reduce_sum",
"tf.math.reduce_sum",
"tf.reduce_mean",
"tf.math.reduce_mean",
"tf.reduce_prod",
"tf.math.reduce_prod",
"tf.reduce_logsumexp",
"tf.math.reduce_logsumexp",
"tf.reduce_join",
"tf.confusion_matrix",
"tf.math.confusion_matrix",
"tf.math.in_top_k",
"tf.nn.depth_to_space",
"tf.nn.embedding_lookup",
"tf.nn.embedding_lookup_sparse",
"tf.nn.in_top_k",
"tf.nn.space_to_depth",
"tf.test.assert_equal_graph_def",
"tf.linalg.norm",
"tf.norm",
"tf.reverse_sequence",
"tf.sparse_split",
# tf.nn.softmax_cross_entropy_with_logits *must* be called with
# keyword arguments. Add keyword arguments in rare case when they
# are not specified.
"tf.nn.softmax_cross_entropy_with_logits",
"tf.nn.fractional_avg_pool",
"tf.nn.fractional_max_pool",
"tf.image.sample_distorted_bounding_box",
"tf.gradients",
"tf.hessians",
"tf.nn.max_pool",
"tf.nn.avg_pool",
"tf.estimator.LinearClassifier",
"tf.estimator.LinearRegressor",
"tf.estimator.DNNLinearCombinedClassifier",
"tf.estimator.DNNLinearCombinedRegressor",
"tf.estimator.DNNRegressor",
"tf.estimator.DNNClassifier",
"tf.estimator.BaselineClassifier",
"tf.estimator.BaselineRegressor",
"tf.initializers.uniform_unit_scaling",
"tf.uniform_unit_scaling_initializer",
}
# Manual mapping of function names to be reordered to their list of argument
# names, in order. Only use this if argument names cannot be autodetected,
# e.g. if the functions are in contrib.
self.manual_function_reorders = {
"tf.contrib.summary.audio": [
"name", "tensor", "sample_rate", "max_outputs", "family", "step"],
"tf.contrib.summary.histogram": [
"name", "tensor", "family", "step"],
"tf.contrib.summary.image": [
"name", "tensor", "bad_color", "max_images", "family", "step"],
"tf.contrib.summary.scalar": [
"name", "tensor", "family", "step"],
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = dict(reorders_v2.reorders)
self.function_reorders.update(self.manual_function_reorders)
contrib_warning = (
ast_edits.ERROR,
"<function name> cannot be converted automatically. tf.contrib will not"
" be distributed with TensorFlow 2.0, please consider an alternative in"
" non-contrib TensorFlow, a community-maintained repository, or fork "
"the required code."
)
flags_warning = (
ast_edits.ERROR,
"tf.flags has been removed, please use the argparse or absl"
" modules if you need command line parsing.")
decay_function_comment = (
ast_edits.INFO,
"To use learning rate decay schedules with TensorFlow 2.0, switch to "
"the schedules in `tf.keras.optimizers.schedules`.\n"
)
assert_return_type_comment = (
ast_edits.INFO,
"<function name> has been changed to return None, the "
"data argument has been removed, and arguments have been reordered."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
assert_rank_comment = (
ast_edits.INFO,
"<function name> has been changed to return None, and"
" the data and summarize arguments have been removed."
"\nThe calls have been converted to compat.v1 for safety (even though "
" they may already have been correct)."
)
initializers_no_dtype_comment = (
ast_edits.INFO,
"Initializers no longer have the "
"dtype argument in the constructor or partition_info argument in the "
"__call__ method.\nThe calls have been converted to compat.v1 for"
"safety (even though they may already have been correct).")
metrics_comment = (
ast_edits.INFO,
"tf.metrics have been replaced with object oriented versions in"
" TF 2.0 and after. The metric function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
losses_comment = (
ast_edits.INFO,
"tf.losses have been replaced with object oriented versions in"
" TF 2.0 and after. The loss function calls have been converted to "
"compat.v1 for backward compatibility. Please update these calls to "
"the TF 2.0 versions.")
# This could be done with a _rename_if_arg_not_found_transformer
deprecate_partition_strategy_comment = (
ast_edits.WARNING,
"`partition_strategy` has been removed from <function name>. "
" The 'div' strategy will be used by default.")
# make change instead
uniform_unit_scaling_initializer_comment = (
ast_edits.ERROR,
"uniform_unit_scaling_initializer has been removed. Please use"
" tf.initializers.variance_scaling instead with distribution=uniform "
"to get equivalent behaviour.")
# Make change instead (issue warning about strip_...)
export_saved_model_renamed = (
ast_edits.ERROR,
"(Manual edit required) Please rename the method export_savedmodel() "
"to export_saved_model(). Two things to note:\n\t(1) The argument "
"strip_default_attributes has been removed. The function will always "
"strip the default attributes from ops. If this breaks your code, "
"please switch to tf.compat.v1.estimator.Estimator.\n\t(2) This change "
"only effects core estimator. If you are using "
"tf.contrib.learn.Estimator, please switch to using core estimator.")
# TODO(b/124529441): if possible eliminate need for manual checking.
contrib_summary_comment = (
ast_edits.WARNING,
"(Manual check required) tf.contrib.summary.* functions have been "
"migrated best-effort to tf.compat.v2.summary.* equivalents where "
"possible, but the resulting code may not always work. Please check "
"manually; you can report migration failures on b/124529441.")
# Function warnings. <function name> placeholder inside warnings will be
# replaced by function name.
# You can use *. to add items which do not check the FQN, and apply to e.g.,
# methods.
self.function_warnings = {
"*.export_savedmodel":
export_saved_model_renamed,
"tf.assert_equal":
assert_return_type_comment,
"tf.assert_none_equal":
assert_return_type_comment,
"tf.assert_negative":
assert_return_type_comment,
"tf.assert_positive":
assert_return_type_comment,
"tf.assert_non_negative":
assert_return_type_comment,
"tf.assert_non_positive":
assert_return_type_comment,
"tf.assert_near":
assert_return_type_comment,
"tf.assert_less":
assert_return_type_comment,
"tf.assert_less_equal":
assert_return_type_comment,
"tf.assert_greater":
assert_return_type_comment,
"tf.assert_greater_equal":
assert_return_type_comment,
"tf.assert_integer":
assert_return_type_comment,
"tf.assert_type":
assert_return_type_comment,
"tf.assert_scalar":
assert_return_type_comment,
"tf.assert_rank":
assert_rank_comment,
"tf.assert_rank_at_least":
assert_rank_comment,
"tf.assert_rank_in":
assert_rank_comment,
"tf.contrib.summary.audio":
contrib_summary_comment,
"tf.contrib.summary.histogram":
contrib_summary_comment,
"tf.contrib.summary.image":
contrib_summary_comment,
"tf.contrib.summary.scalar":
contrib_summary_comment,
"tf.debugging.assert_equal":
assert_return_type_comment,
"tf.debugging.assert_greater":
assert_return_type_comment,
"tf.debugging.assert_greater_equal":
assert_return_type_comment,
"tf.debugging.assert_integer":
assert_return_type_comment,
"tf.debugging.assert_less":
assert_return_type_comment,
"tf.debugging.assert_less_equal":
assert_return_type_comment,
"tf.debugging.assert_near":
assert_return_type_comment,
"tf.debugging.assert_negative":
assert_return_type_comment,
"tf.debugging.assert_non_negative":
assert_return_type_comment,
"tf.debugging.assert_non_positive":
assert_return_type_comment,
"tf.debugging.assert_none_equal":
assert_return_type_comment,
"tf.debugging.assert_positive":
assert_return_type_comment,
"tf.debugging.assert_type":
assert_return_type_comment,
"tf.debugging.assert_scalar":
assert_return_type_comment,
"tf.debugging.assert_rank":
assert_rank_comment,
"tf.debugging.assert_rank_at_least":
assert_rank_comment,
"tf.debugging.assert_rank_in":
assert_rank_comment,
"tf.train.exponential_decay":
decay_function_comment,
"tf.train.piecewise_constant_decay":
decay_function_comment,
"tf.train.polynomial_decay":
decay_function_comment,
"tf.train.natural_exp_decay":
decay_function_comment,
"tf.train.inverse_time_decay":
decay_function_comment,
"tf.train.cosine_decay":
decay_function_comment,
"tf.train.cosine_decay_restarts":
decay_function_comment,
"tf.train.linear_cosine_decay":
decay_function_comment,
"tf.train.noisy_linear_cosine_decay":
decay_function_comment,
"tf.nn.embedding_lookup":
deprecate_partition_strategy_comment,
"tf.nn.embedding_lookup_sparse":
deprecate_partition_strategy_comment,
"tf.nn.nce_loss":
deprecate_partition_strategy_comment,
"tf.nn.safe_embedding_lookup_sparse":
deprecate_partition_strategy_comment,
"tf.nn.sampled_softmax_loss":
deprecate_partition_strategy_comment,
"tf.keras.initializers.Zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.zeros":
initializers_no_dtype_comment,
"tf.keras.initializers.Ones":
initializers_no_dtype_comment,
"tf.keras.initializers.ones":
initializers_no_dtype_comment,
"tf.keras.initializers.Constant":
initializers_no_dtype_comment,
"tf.keras.initializers.constant":
initializers_no_dtype_comment,
"tf.keras.initializers.VarianceScaling":
initializers_no_dtype_comment,
"tf.keras.initializers.Orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.keras.initializers.Identity":
initializers_no_dtype_comment,
"tf.keras.initializers.identity":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.keras.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.initializers.zeros":
initializers_no_dtype_comment,
"tf.zeros_initializer":
initializers_no_dtype_comment,
"tf.initializers.ones":
initializers_no_dtype_comment,
"tf.ones_initializer":
initializers_no_dtype_comment,
"tf.initializers.constant":
initializers_no_dtype_comment,
"tf.constant_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_uniform":
initializers_no_dtype_comment,
"tf.random_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.random_normal":
initializers_no_dtype_comment,
"tf.random_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.truncated_normal":
initializers_no_dtype_comment,
"tf.truncated_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.variance_scaling":
initializers_no_dtype_comment,
"tf.variance_scaling_initializer":
initializers_no_dtype_comment,
"tf.initializers.orthogonal":
initializers_no_dtype_comment,
"tf.orthogonal_initializer":
initializers_no_dtype_comment,
"tf.initializers.identity":
initializers_no_dtype_comment,
"tf.glorot_uniform_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_uniform":
initializers_no_dtype_comment,
"tf.glorot_normal_initializer":
initializers_no_dtype_comment,
"tf.initializers.glorot_normal":
initializers_no_dtype_comment,
"tf.losses.absolute_difference":
losses_comment,
"tf.losses.add_loss":
losses_comment,
"tf.losses.compute_weighted_loss":
losses_comment,
"tf.losses.cosine_distance":
losses_comment,
"tf.losses.get_losses":
losses_comment,
"tf.losses.get_regularization_loss":
losses_comment,
"tf.losses.get_regularization_losses":
losses_comment,
"tf.losses.get_total_loss":
losses_comment,
"tf.losses.hinge_loss":
losses_comment,
"tf.losses.huber_loss":
losses_comment,
"tf.losses.log_loss":
losses_comment,
"tf.losses.mean_pairwise_squared_error":
losses_comment,
"tf.losses.mean_squared_error":
losses_comment,
"tf.losses.sigmoid_cross_entropy":
losses_comment,
"tf.losses.softmax_cross_entropy":
losses_comment,
"tf.losses.sparse_softmax_cross_entropy":
losses_comment,
"tf.metrics.accuracy":
metrics_comment,
"tf.metrics.auc":
metrics_comment,
"tf.metrics.average_precision_at_k":
metrics_comment,
"tf.metrics.false_negatives":
metrics_comment,
"tf.metrics.false_negatives_at_thresholds":
metrics_comment,
"tf.metrics.false_positives":
metrics_comment,
"tf.metrics.false_positives_at_thresholds":
metrics_comment,
"tf.metrics.mean":
metrics_comment,
"tf.metrics.mean_absolute_error":
metrics_comment,
"tf.metrics.mean_cosine_distance":
metrics_comment,
"tf.metrics.mean_iou":
metrics_comment,
"tf.metrics.mean_per_class_accuracy":
metrics_comment,
"tf.metrics.mean_relative_error":
metrics_comment,
"tf.metrics.mean_squared_error":
metrics_comment,
"tf.metrics.mean_tensor":
metrics_comment,
"tf.metrics.percentage_below":
metrics_comment,
"tf.metrics.precision":
metrics_comment,
"tf.metrics.precision_at_k":
metrics_comment,
"tf.metrics.precision_at_thresholds":
metrics_comment,
"tf.metrics.precision_at_top_k":
metrics_comment,
"tf.metrics.recall":
metrics_comment,
"tf.metrics.recall_at_k":
metrics_comment,
"tf.metrics.recall_at_thresholds":
metrics_comment,
"tf.metrics.recall_at_top_k":
metrics_comment,
"tf.metrics.root_mean_squared_error":
metrics_comment,
"tf.metrics.sensitivity_at_specificity":
metrics_comment,
"tf.metrics.sparse_average_precision_at_k":
metrics_comment,
"tf.metrics.sparse_precision_at_k":
metrics_comment,
"tf.metrics.specificity_at_sensitivity":
metrics_comment,
"tf.metrics.true_negatives":
metrics_comment,
"tf.metrics.true_negatives_at_thresholds":
metrics_comment,
"tf.metrics.true_positives":
metrics_comment,
"tf.metrics.true_positives_at_thresholds":
metrics_comment,
"tf.get_variable":
(ast_edits.WARNING,
"<function name> returns ResourceVariables by default in 2.0, "
"which have well-defined semantics and are stricter about shapes. "
"You can disable this behavior by passing use_resource=False, or "
"by calling tf.compat.v1.disable_resource_variables().")
}
# Warnings that are emitted only if a specific arg is found.
self.function_arg_warnings = {
"tf.nn.conv1d": {
("use_cudnn_on_gpu", 4): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d": {
("use_cudnn_on_gpu", 4): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d_backprop_filter": {
("use_cudnn_on_gpu", 5): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.nn.conv2d_backprop_input": {
("use_cudnn_on_gpu", 5): (
ast_edits.WARNING,
"use_cudnn_on_gpu has been removed, behavior is now equivalent"
"to setting it to True."),
},
"tf.gradients": {
("colocate_gradients_with_ops", 4): (
ast_edits.INFO,
"tf.gradients no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"*.minimize": {
("colocate_gradients_with_ops", 5): (
ast_edits.INFO,
"Optimizer.minimize no longer takes "
"'colocate_gradients_with_ops' argument, it behaves as if it "
"was set to True."),
},
"*.compute_gradients": {
("colocate_gradients_with_ops", 4): (
ast_edits.INFO,
"Optimizer.compute_gradients no "
"longer takes 'colocate_gradients_with_ops' argument, it "
"behaves as if it was set to True."),
},
"tf.cond": {
("strict", 3): (
ast_edits.WARNING,
"tf.cond no longer takes 'strict' argument, it behaves as "
"if was set to True.")
},
"tf.contrib.summary.audio": {
("family", 4): (
ast_edits.WARNING,
"tf.contrib.summary.* functions no longer take the 'family' "
"argument; instead name scoping should be used. This call site "
"specifies a family argument so it cannot be converted safely.")
},
"tf.contrib.summary.histogram": {
("family", 2): (
ast_edits.WARNING,
"tf.contrib.summary.* functions no longer take the 'family' "
"argument; instead name scoping should be used. This call site "
"specifies a family argument so it cannot be converted safely.")
},
"tf.contrib.summary.image": {
("bad_color", 2): (
ast_edits.WARNING,
"tf.contrib.summary.image no longer takes the 'bad_color' "
"argument; caller must now preprocess if needed. This call "
"site specifies a bad_color argument so it cannot be converted "
"safely."),
("family", 4): (
ast_edits.WARNING,
"tf.contrib.summary.* functions no longer take the 'family' "
"argument; instead name scoping should be used. This call site "
"specifies a family argument so it cannot be converted safely.")
},
"tf.contrib.summary.scalar": {
("family", 2): (
ast_edits.WARNING,
"tf.contrib.summary.* functions no longer take the 'family' "
"argument; instead name scoping should be used. This call site "
"specifies a family argument so it cannot be converted safely.")
},
"tf.image.resize": {
("align_corners",
3): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize."),
},
"tf.image.resize_bilinear": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_bilinear."),
},
"tf.image.resize_area": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_area."),
},
"tf.image.resize_bicubic": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_bicubic."),
},
"tf.image.resize_nearest_neighbor": {
("align_corners",
2): (ast_edits.WARNING,
"align_corners is not supported by tf.image.resize, the new "
"default transformation is close to what v1 provided. If you "
"require exactly the same transformation as before, use "
"compat.v1.image.resize_nearest_neighbor."),
},
}
# Specially handled functions
# Each transformer is a callable which will be called with the arguments
# transformer(parent, node, full_name, name, logs)
# Where logs is a list to which (level, line, col, msg) tuples can be
# appended, full_name is the FQN of the function called (or None if that is
# unknown), name is the name of the function called (or None is that is
# unknown). node is an ast.Call node representing this function call, and
# parent is its parent in the AST.
# The function may modify node (but not parent), and must return
# - none, if nothing was modified
# - node, if node was modified in place (make sure to use
# pasta.ast_utils.replace_child to swap out children, otherwise formatting
# may get messy)
# - a replacement for node, if the whole call node was replaced. The caller
# will take care of changing parent.
self.function_transformers = {
"*.make_initializable_iterator": _iterator_transformer,
"*.make_one_shot_iterator": _iterator_transformer,
"tf.nn.dropout": _dropout_transformer,
"tf.to_bfloat16": _cast_transformer,
"tf.to_complex128": _cast_transformer,
"tf.to_complex64": _cast_transformer,
"tf.to_double": _cast_transformer,
"tf.to_float": _cast_transformer,
"tf.to_int32": _cast_transformer,
"tf.to_int64": _cast_transformer,
"tf.nn.softmax_cross_entropy_with_logits":
_softmax_cross_entropy_with_logits_transformer,
"tf.image.extract_glimpse": _extract_glimpse_transformer,
"tf.image.resize_area": _image_resize_transformer,
"tf.image.resize_bicubic": _image_resize_transformer,
"tf.image.resize_bilinear": _image_resize_transformer,
"tf.image.resize_nearest_neighbor": _image_resize_transformer,
"tf.nn.fractional_avg_pool": _pool_seed_transformer,
"tf.nn.fractional_max_pool": _pool_seed_transformer,
"tf.name_scope": _name_scope_transformer,
"tf.device": functools.partial(
_rename_if_arg_found_transformer, arg_name="device_name",
arg_ok_predicate=_is_ast_str, remove_if_ok=False,
message="tf.device no longer takes functions as an argument. "
"We could not determine that the argument value is a string, so "
"the call was converted to compat.v1."),
"tf.zeros_like": functools.partial(
_rename_if_arg_found_transformer, arg_name="optimize",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.zeros_like no longer takes an optimize argument, and "
"behaves as if optimize=True. This call site specifies something "
"other than optimize=True, so it was converted to compat.v1."),
"tf.ones_like": functools.partial(
_rename_if_arg_found_transformer, arg_name="optimize",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.ones_like no longer takes an optimize argument, and "
"behaves as if optimize=True. This call site specifies something "
"other than optimize=True, so it was converted to compat.v1."),
"tf.while_loop": functools.partial(
_rename_if_arg_found_transformer,
arg_name="return_same_structure",
arg_ok_predicate=_is_ast_true, remove_if_ok=True,
message="tf.while_loop no longer takes 'return_same_structure' "
"argument and behaves as if return_same_structure=True. This call "
"site specifies something other than return_same_structure=True, "
"so it was converted to compat.v1."),
"tf.nn.ctc_beam_search_decoder": functools.partial(
_rename_if_arg_found_transformer,
arg_name="merge_repeated",
arg_ok_predicate=_is_ast_false, remove_if_ok=True,
message="tf.nn.ctc_beam_search_decoder no longer takes the "
"'merge_repeated' argument and behaves as if merge_repeated=False. "
"This call site specifies something other than "
"merge_repeated=False, so it was converted to compat.v1."),
"tf.nn.erosion2d": functools.partial(
_add_argument_transformer,
arg_name="data_format",
arg_value_ast=ast.Str("NHWC")),
"tf.contrib.summary.audio": _add_summary_step_transformer,
"tf.contrib.summary.histogram": _add_summary_step_transformer,
"tf.contrib.summary.image": _add_summary_step_transformer,
"tf.contrib.summary.scalar": _add_summary_step_transformer,
"tf.estimator.LinearClassifier": _add_loss_reduction_transformer,
"tf.estimator.LinearRegressor": _add_loss_reduction_transformer,
"tf.estimator.DNNLinearCombinedClassifier":
_add_loss_reduction_transformer,
"tf.estimator.DNNLinearCombinedRegressor":
_add_loss_reduction_transformer,
"tf.estimator.DNNRegressor": _add_loss_reduction_transformer,
"tf.estimator.DNNClassifier": _add_loss_reduction_transformer,
"tf.estimator.BaselineClassifier": _add_loss_reduction_transformer,
"tf.estimator.BaselineRegressor": _add_loss_reduction_transformer,
"tf.initializers.uniform_unit_scaling":
_add_uniform_scaling_initializer_transformer,
"tf.uniform_unit_scaling_initializer":
_add_uniform_scaling_initializer_transformer,
}
self.module_deprecations = {
"tf.contrib": contrib_warning,
"tf.flags": flags_warning,
}
def _is_ast_str(node):
"""Determine whether this node represents a string."""
allowed_types = [ast.Str]
if hasattr(ast, "Bytes"):
allowed_types += [ast.Bytes]
if hasattr(ast, "JoinedStr"):
allowed_types += [ast.JoinedStr]
if hasattr(ast, "FormattedValue"):
allowed_types += [ast.FormattedValue]
return isinstance(node, allowed_types)
def _is_ast_true(node):
if hasattr(ast, "NameConstant"):
return isinstance(node, ast.NameConstant) and node.value is True
else:
return isinstance(node, ast.Name) and node.id == "True"
def _is_ast_false(node):
if hasattr(ast, "NameConstant"):
return isinstance(node, ast.NameConstant) and node.value is False
else:
return isinstance(node, ast.Name) and node.id == "False"
# Lots of unused arguments below, since these are called in a standard manner.
# pylint: disable=unused-argument
def _rename_if_arg_found_transformer(parent, node, full_name, name, logs,
arg_name=None,
arg_ok_predicate=None,
remove_if_ok=False,
message=None):
"""Replaces the given call with tf.compat.v1 if the given arg is found.
This requires the function to be called with all named args, so for using
this transformer, the function should also be added to renames.
If the arg is not found, the call site is left alone.
If the arg is found, and if arg_ok_predicate is given, it is called with
the ast Expression representing the argument value found. If it returns
True, the function is left alone.
If the arg is found, arg_ok_predicate is not None and returns ok, and
remove_if_ok is True, the argument is removed from the call.
Otherwise, `compat.v1` is inserted between tf and the function name.
Args:
parent: Parent of node.
node: ast.Call node to maybe modify.
full_name: full name of function to modify
name: name of function to modify
logs: list of logs to append to
arg_name: name of the argument to look for
arg_ok_predicate: predicate callable with the ast of the argument value,
returns whether the argument value is allowed.
remove_if_ok: remove the argument if present and ok as determined by
arg_ok_predicate.
message: message to print if a non-ok arg is found (and hence, the function
is renamed to its compat.v1 version).
Returns:
node, if it was modified, else None.
"""
# Check whether arg is there.
arg_present, arg_value = ast_edits.get_arg_value(node, arg_name)
if not arg_present:
return
# Check whether arg is problematic (and if not, maybe remove it).
if arg_ok_predicate and arg_ok_predicate(arg_value):
if remove_if_ok:
for i, kw in enumerate(node.keywords):
if kw.arg == arg_name:
node.keywords.pop(i)
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Removed argument %s for function %s" % (
arg_name, full_name or name)))
break
return node
else:
return
# All conditions met, insert v1 and log what we did.
# We must have a full name, so the func is an attribute.
new_name = full_name.replace("tf.", "tf.compat.v1.", 1)
node.func = ast_edits.full_name_node(new_name)
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Renaming %s to %s because argument %s is present. %s" %
(full_name, new_name, arg_name, message if message is not None else "")
))
return node
def _add_argument_transformer(parent, node, full_name, name, logs,
arg_name, arg_value_ast):
"""Adds an argument (as a final kwarg arg_name=arg_value_ast)."""
node.keywords.append(ast.keyword(arg=arg_name, value=arg_value_ast))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding argument '%s' to call to %s." % (pasta.dump(node.keywords[-1],
full_name or name))
))
return node
def _iterator_transformer(parent, node, full_name, name, logs):
"""Transform iterator methods to compat function calls."""
# First, check that node.func.value is not already something we like
# (tf.compat.v1.data), or something which is handled in the rename
# (tf.data). This transformer only handles the method call to function call
# conversion.
if full_name and (full_name.startswith("tf.compat.v1.data") or
full_name.startswith("tf.data")):
return
# This should never happen, since we're only called for Attribute nodes.
if not isinstance(node.func, ast.Attribute):
return
# Transform from x.f(y) to tf.compat.v1.data.f(x, y)
# Fortunately, node.func.value should already have valid position info
node.args = [node.func.value] + node.args
node.func.value = ast_edits.full_name_node("tf.compat.v1.data")
logs.append((ast_edits.WARNING, node.lineno, node.col_offset,
"Changing dataset.%s() to tf.compat.v1.data.%s(dataset). "
"Please check this transformation.\n" % (name, name)))
return node
def _dropout_transformer(parent, node, full_name, name, logs):
"""Replace keep_prob with 1-rate."""
def _replace_keep_prob_node(parent, old_value):
"""Replaces old_value with 1-(old_value)."""
one = ast.Num(n=1)
one.lineno = 0
one.col_offset = 0
new_value = ast.BinOp(left=one, op=ast.Sub(),
right=old_value)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around keep_prob.value (and remove the old prefix/
# suffix, they should only be around new_value).
pasta.base.formatting.set(old_value, "prefix", "(")
pasta.base.formatting.set(old_value, "suffix", ")")
# Check if we have a keep_prob keyword arg
for keep_prob in node.keywords:
if keep_prob.arg == "keep_prob":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate\n"))
keep_prob.arg = "rate"
_replace_keep_prob_node(keep_prob, keep_prob.value)
return node
# Maybe it was a positional arg
if len(node.args) < 2:
logs.append((ast_edits.ERROR, node.lineno, node.col_offset,
"tf.nn.dropout called without arguments, so "
"automatic fix was disabled. tf.nn.dropout has changed "
"the semantics of the second argument."))
else:
_replace_keep_prob_node(node, node.args[1])
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing keep_prob arg of tf.nn.dropout to rate, and "
"recomputing value.\n"))
return node
def _cast_transformer(parent, node, full_name, name, logs):
"""Transforms to_int and to_float to cast(..., dtype=...)."""
# Find out the dtype to cast to from the function name
dtype_str = name[3:]
# Special cases where the full dtype is not given
if dtype_str == "float":
dtype_str = "float32"
elif dtype_str == "double":
dtype_str = "float64"
new_arg = ast.keyword(arg="dtype",
value=ast.Attribute(value=ast.Name(id="tf",
ctx=ast.Load()),
attr=dtype_str, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 2:
name_arg = ast.keyword(arg="name",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(name_arg)
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "cast"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "cast"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changed %s call to tf.cast(..., dtype=tf.%s)." % (full_name,
dtype_str)))
return node
def _softmax_cross_entropy_with_logits_transformer(
parent, node, full_name, name, logs):
"""Wrap labels argument with stop_gradients."""
def _wrap_label(parent, old_value):
"""Wrap labels with tf.stop_gradient."""
already_stop_grad = (isinstance(old_value, ast.Call) and
isinstance(old_value.func, ast.Attribute) and
old_value.func.attr == "stop_gradient" and
isinstance(old_value.func.value, ast.Name) and
old_value.func.value.id == "tf")
if already_stop_grad:
return False
try:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [])
except TypeError:
new_value = ast.Call(
ast.Name(id="tf.stop_gradient", ctx=ast.Load()),
[old_value], [], None, None)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
return True
# Check if we have a labels keyword arg
for karg in node.keywords:
if karg.arg == "labels":
if _wrap_label(karg, karg.value):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing labels arg of "
"tf.nn.softmax_cross_entropy_with_logits to "
"tf.stop_gradient(labels). Please check this "
"transformation.\n"))
return node
return node
def _image_resize_transformer(parent, node, full_name, name, logs):
"""Transforms image.resize_* to image.resize(..., method=*, ...)."""
resize_method = name[7:].upper()
new_arg = ast.keyword(arg="method",
value=ast.Attribute(
value=ast.Attribute(
value=ast.Attribute(
value=ast.Name(id="tf", ctx=ast.Load()),
attr="image", ctx=ast.Load()),
attr="ResizeMethod", ctx=ast.Load()),
attr=resize_method, ctx=ast.Load()))
# Ensures a valid transformation when a positional name arg is given
if len(node.args) == 4:
pos_arg = ast.keyword(arg="preserve_aspect_ratio",
value=node.args[-1])
node.args = node.args[:-1]
node.keywords.append(pos_arg)
if len(node.args) == 3:
pos_arg = ast.keyword(arg="align_corners",
value=node.args[-1])
node.args = node.args[:-1]
new_keywords = []
for kw in node.keywords:
if kw.arg != "align_corners":
new_keywords.append(kw)
node.keywords = new_keywords
# Python3 ast requires the args for the Attribute, but codegen will mess up
# the arg order if we just set them to 0.
new_arg.value.lineno = node.lineno
new_arg.value.col_offset = node.col_offset+100
node.keywords.append(new_arg)
if isinstance(node.func, ast.Attribute):
node.func.attr = "resize"
else:
assert isinstance(node.func, ast.Name)
node.func.id = "resize"
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changed %s call to tf.image.resize(..., "
"method=tf.image.ResizeMethod.%s)." % (full_name,
resize_method)))
return node
def _pool_seed_transformer(parent, node, full_name, name, logs):
"""Removes seed2 and deterministic, and adds non-zero seed if needed."""
# This requires that this function uses all kwargs (add to renames!).
seed_arg = None
deterministic = False
modified = False
new_keywords = []
for kw in node.keywords:
if sys.version_info[:2] >= (3, 5) and isinstance(kw, ast.Starred):
pass
elif kw.arg == "seed":
seed_arg = kw
elif kw.arg == "seed2" or kw.arg == "deterministic":
lineno = getattr(kw, "lineno", node.lineno)
col_offset = getattr(kw, "col_offset", node.col_offset)
logs.append((ast_edits.INFO, lineno, col_offset,
"Removed argument %s for function %s" % (
kw.arg, full_name or name)))
if kw.arg == "deterministic":
if not _is_ast_false(kw.value):
deterministic = True
modified = True
continue
new_keywords.append(kw)
if deterministic:
if seed_arg is None:
new_keywords.append(ast.keyword(arg="seed", value=ast.Num(42)))
logs.add((
ast_edits.INFO, node.lineno, node.col_offset,
"Adding seed=42 to call to %s since determinism was requested" % (
full_name or name)
))
else:
logs.add((
ast_edits.WARNING, node.lineno, node.col_offset,
"The deterministic argument is deprecated for %s, pass a "
"non-zero seed for determinism. The deterministic argument is "
"present, possibly not False, and the seed is already set. The "
"converter cannot determine whether it is nonzero, please check."
))
if modified:
node.keywords = new_keywords
return node
else:
return
def _extract_glimpse_transformer(parent, node, full_name, name, logs):
def _replace_uniform_noise_node(parent, old_value):
"""Replaces old_value with 'uniform' or 'guassian'."""
uniform = ast.Str(s="uniform")
gaussian = ast.Str(s="gaussian")
new_value = ast.IfExp(body=uniform, test=old_value, orelse=gaussian)
# This copies the prefix and suffix on old_value to new_value.
pasta.ast_utils.replace_child(parent, old_value, new_value)
ast.copy_location(new_value, old_value)
# Put parentheses around noise.value.test (and remove the old prefix/
# suffix, they should only be around new_value.test), so that:
# "uniform" if (a if b else c) else "gaussian" is valid.
pasta.base.formatting.set(new_value.test, "prefix", "(")
pasta.base.formatting.set(new_value.test, "suffix", ")")
# Check if we have a uniform_noise keyword arg
for uniform_noise in node.keywords:
if uniform_noise.arg == "uniform_noise":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing uniform_noise arg of tf.image.extract_glimpse "
"to noise, and recomputing value. Please check this "
"transformation.\n"))
uniform_noise.arg = "noise"
value = "uniform" if uniform_noise.value else "gaussian"
_replace_uniform_noise_node(uniform_noise, uniform_noise.value)
return node
# Since `noise`/`uniform_noise` is optional arg, nothing needs to be
# done if len(node.args) < 5.
if len(node.args) >= 5:
_replace_uniform_noise_node(node, node.args[5])
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Changing uniform_noise arg of tf.image.extract_glimpse to "
"noise, and recomputing value.\n"))
return node
def _add_summary_step_transformer(parent, node, full_name, name, logs):
"""Adds a step argument to the summary API call if not specified.
The inserted argument value is tf.compat.v1.train.get_or_create_global_step().
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "step":
return node
default_value = "tf.compat.v1.train.get_or_create_global_step()"
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(default_value)
node.keywords.append(ast.keyword(arg="step", value=ast_value))
logs.append((
ast_edits.WARNING, node.lineno, node.col_offset,
"Summary API writing function %s now requires a 'step' argument; "
"inserting default of %s." % (full_name or name, default_value)))
return node
def _add_loss_reduction_transformer(parent, node, full_name, name, logs):
"""Adds a loss_reduction argument if not specified.
Default value for tf.estimator.*Classifier and tf.estimator.*Regressor
loss_reduction argument changed to SUM_OVER_BATCH_SIZE. So, we update
existing calls to use the old default value `tf.losses.Reduction.SUM`.
Note: to apply this transformation, symbol must be added
to reordered_function_names above.
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "loss_reduction":
return node
# TODO(annarev): this should be updated to tf.keras.losses.Reduction.SUM
# once b/125525822 is fixed.
default_value = "tf.compat.v1.losses.Reduction.SUM"
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(default_value)
node.keywords.append(ast.keyword(arg="loss_reduction", value=ast_value))
logs.append((
ast_edits.INFO, node.lineno, node.col_offset,
"%s: Default value of loss_reduction has been changed to "
"SUM_OVER_BATCH_SIZE; inserting old default value %s.\n"
% (full_name or name, default_value)))
return node
def _add_uniform_scaling_initializer_transformer(
parent, node, full_name, name, logs):
"""Updates references to uniform_unit_scaling_initializer.
Transforms:
tf.uniform_unit_scaling_initializer(factor, seed, dtype) to
tf.keras.initializers.VarianceScaling(
scale=factor, distribution="uniform", seed=seed)
Note: to apply this transformation, symbol must be added
to reordered_function_names above.
"""
for keyword_arg in node.keywords:
if keyword_arg.arg == "factor":
keyword_arg.arg = "scale"
distribution_value = "\"uniform\""
# Parse with pasta instead of ast to avoid emitting a spurious trailing \n.
ast_value = pasta.parse(distribution_value)
node.keywords.append(ast.keyword(arg="distribution", value=ast_value))
lineno = node.func.value.lineno
col_offset = node.func.value.col_offset
node.func.value = ast_edits.full_name_node("tf.keras.initializers")
node.func.value.lineno = lineno
node.func.value.col_offset = col_offset
node.func.attr = "VarianceScaling"
return node
def _name_scope_transformer(parent, node, full_name, name, logs):
"""Fix name scope invocation to use 'default_name' and omit 'values' args."""
name_found, name = ast_edits.get_arg_value(node, "name", 0)
default_found, default_name = ast_edits.get_arg_value(node, "default_name", 1)
# If an actual name was given...
if name_found and pasta.dump(name) != "None":
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"`name` passed to `name_scope`. Because you may be re-entering"
" an existing scope, it is not safe to convert automatically, "
" the v2 name_scope does not support re-entering scopes by"
" name.\n"))
# Rename to compat.v1
new_name = "tf.compat.v1.name_scope"
logs.append((ast_edits.INFO, node.func.lineno, node.func.col_offset,
"Renamed %r to %r" % (full_name, new_name)))
new_name_node = ast_edits.full_name_node(new_name, node.func.ctx)
ast.copy_location(new_name_node, node.func)
pasta.ast_utils.replace_child(node, node.func, new_name_node)
return node
if default_found:
# New name scope doesn't have name, but it has a default name. We use
# name=default_name, and values can be dropped (it's only for
# error reporting and useless outside of graph mode).
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Using default_name as name in call to name_scope.\n"))
# Remove all args other than name
node.args = []
node.keywords = [ast.keyword(arg="name", value=default_name)]
return node
logs.append((ast_edits.ERROR, node.lineno, node.col_offset,
"name_scope call with neither name nor default_name cannot be "
"converted properly."))
| apache-2.0 |
openaid-IATI/OIPA | OIPA/task_queue/utils.py | 1 | 7272 | import time
import requests
from celery.task.control import inspect
from iati_synchroniser.models import (
AsyncTasksFinished, Dataset, DatasetDownloadsStarted
)
from task_queue.validation import DatasetValidationTask
class Tasks:
"""
The logic: if only one task is running then the task will
be continued to run.
- parent_task: if only this task is running then continued to run
- children_task: if the active task has one or more children_task
then not continue to run the task.
"""
def __init__(self, parent_task, children_tasks):
self.inspect = inspect()
self.parent_task = parent_task
self.children_tasks = children_tasks
def workers(self):
return self.inspect.active()
def list(self):
task_list = [self.parent_task]
task_list.extend(self.children_tasks)
return task_list
def count(self):
count = 0
try:
for worker in self.workers():
for task in self.workers()[worker]:
if task['type'] in self.list():
count += 1
except TypeError:
pass
return count
def active(self):
tasks = list()
try:
for worker in self.workers():
for task in self.workers()[worker]:
if task['type'] in self.list() \
and task['type'] not in tasks:
tasks.append(task['type'])
except TypeError:
pass
return tasks
def is_parent(self):
if self.count() == 1 and self.active() == [self.parent_task]:
return True
return False
def extract_values(obj, key):
"""Pull all values of specified key from nested JSON."""
arr = []
def extract(obj, arr, key):
"""Recursively search for values of key in JSON tree."""
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, (dict, list)):
extract(v, arr, key)
elif k == key:
arr.append(v)
elif isinstance(obj, list):
for item in obj:
extract(item, arr, key)
return arr
results = extract(obj, arr, key)
return results
def reset_automatic_incremental_parse_dbs():
dds = DatasetDownloadsStarted.objects.all()
dds.delete()
ddf = AsyncTasksFinished.objects.all()
ddf.delete()
# Await asynchronous subtasks from other tasks. Started is the number of
# elements that are expected to be in
def await_async_subtasks(started=-1, started_not_set=True):
check_iteration_count = 0
check_iteration_maximum = 3
check_previous_finished_length = 0
check_grace_iteration_count = 0
check_grace_iteration_maximum = 10
check_grace_maximum_disparity = 10
while True:
# Get the size of the started datasets
if not started_not_set:
started = len(DatasetDownloadsStarted.objects.all())
finished = len(AsyncTasksFinished.objects.all())
# Check if the grace should take effect.
# Grace is when the number of failed tasks is very small but the
# number of finished tasks no longer changes. This makes sure that
# the automatic parsing does not get stuck waiting for an unfinished
# async task.
if finished == check_previous_finished_length:
check_grace_iteration_count += 1
if check_grace_iteration_count == check_grace_iteration_maximum:
if started - finished < check_grace_maximum_disparity:
break
else: # More async tasks than expected failed,
# exit automatic parsing
return True
else:
check_grace_iteration_count = 0
# Check if the async tasks are done
if started == finished:
if finished == check_previous_finished_length:
check_iteration_count += 1
if check_iteration_count == check_iteration_maximum:
break
else:
check_iteration_count = 0
# Wait a minute and check again
time.sleep(60)
check_previous_finished_length = finished
# After this while loop finishes, we clear the DatasetDownloads tables
reset_automatic_incremental_parse_dbs()
def automatic_incremental_validation(start_at, check_validation):
# STEP THREE -- DATASET VALIDATION TASK #
# Only execute this step if validation should be active.
if start_at in (1, 2, 3) and check_validation:
# Prepare checks
check_validation_has_started = False
check_validation_is_active = False
check_empty_iteration_count = 0
check_empty_iteration_maximum = 3
while True:
url = "https://iativalidator.iatistandard.org/api/v1/queue/next"
response = requests.get(url, timeout=30)
# If the response is not 200, reset and check back later.
if response.status_code != 200:
check_validation_has_started = False
check_validation_is_active = False
time.sleep(60)
continue
check_content_is_empty = response.content.decode("utf-8") == ""
"""
Case 1: content empty - started = false - active = false
wait for the validator to start
Case 2: content has data - started = false - active = false
set started to true and active to true
Case 3: content has data - started = true - active = true
wait for the content to stop having data!
Case 4: content empty - started = true - active = true
with three iterations, confirm the content is actually empty!
set active to false.
"""
# if check_content_is_empty and not check_validation_has_started and not check_validation_is_active: # NOQA: E501
if not check_content_is_empty and not check_validation_has_started and not check_validation_is_active: # NOQA: E501
check_validation_has_started = True
check_validation_is_active = True
if not check_content_is_empty and check_validation_has_started and check_validation_is_active: # NOQA: E501
check_empty_iteration_count = 0
if check_content_is_empty and check_validation_has_started and check_validation_is_active: # NOQA: E501
if check_empty_iteration_count < check_empty_iteration_maximum:
check_empty_iteration_count += 1
else: # Validation has finished
break
time.sleep(60)
# Now that the "waiting for validator to finish" loop is over, we know
# The validator is finished. Run the task. To reduce complexity, reuse
# the AsyncTasksFinished table.
datasets = Dataset.objects.all()
for dataset in datasets:
DatasetValidationTask.delay(dataset_id=dataset.id)
started = len(Dataset.objects.all())
await_async_subtasks(started)
# STEP THREE -- End #
| agpl-3.0 |
IONISx/edx-platform | setup.py | 11 | 3320 | """
Setup script for the Open edX package.
"""
from setuptools import setup
setup(
name="Open edX",
version="0.5",
install_requires=["setuptools"],
requires=[],
# NOTE: These are not the names we should be installing. This tree should
# be reorganized to be a more conventional Python tree.
packages=[
"openedx.core.djangoapps.course_groups",
"openedx.core.djangoapps.credit",
"openedx.core.djangoapps.user_api",
"lms",
"cms",
],
entry_points={
"openedx.course_tab": [
"ccx = lms.djangoapps.ccx.plugins:CcxCourseTab",
"courseware = lms.djangoapps.courseware.tabs:CoursewareTab",
"course_info = lms.djangoapps.courseware.tabs:CourseInfoTab",
"discussion = lms.djangoapps.django_comment_client.forum.views:DiscussionTab",
"edxnotes = lms.djangoapps.edxnotes.plugins:EdxNotesTab",
"external_discussion = lms.djangoapps.courseware.tabs:ExternalDiscussionCourseTab",
"external_link = lms.djangoapps.courseware.tabs:ExternalLinkCourseTab",
"html_textbooks = lms.djangoapps.courseware.tabs:HtmlTextbookTabs",
"instructor = lms.djangoapps.instructor.views.instructor_dashboard:InstructorDashboardTab",
"notes = lms.djangoapps.notes.views:NotesTab",
"pdf_textbooks = lms.djangoapps.courseware.tabs:PDFTextbookTabs",
"progress = lms.djangoapps.courseware.tabs:ProgressTab",
"static_tab = xmodule.tabs:StaticTab",
"syllabus = lms.djangoapps.courseware.tabs:SyllabusTab",
"teams = lms.djangoapps.teams.plugins:TeamsTab",
"textbooks = lms.djangoapps.courseware.tabs:TextbookTabs",
"wiki = lms.djangoapps.course_wiki.tab:WikiTab",
# ORA 1 tabs (deprecated)
"peer_grading = lms.djangoapps.open_ended_grading.views:PeerGradingTab",
"staff_grading = lms.djangoapps.open_ended_grading.views:StaffGradingTab",
"open_ended = lms.djangoapps.open_ended_grading.views:OpenEndedGradingTab",
],
"openedx.user_partition_scheme": [
"random = openedx.core.djangoapps.user_api.partition_schemes:RandomUserPartitionScheme",
"cohort = openedx.core.djangoapps.course_groups.partition_scheme:CohortPartitionScheme",
"verification = openedx.core.djangoapps.credit.partition_schemes:VerificationPartitionScheme",
],
"openedx.block_structure_transformer": [
"library_content = lms.djangoapps.course_blocks.transformers.library_content:ContentLibraryTransformer",
"split_test = lms.djangoapps.course_blocks.transformers.split_test:SplitTestTransformer",
"start_date = lms.djangoapps.course_blocks.transformers.start_date:StartDateTransformer",
"user_partitions = lms.djangoapps.course_blocks.transformers.user_partitions:UserPartitionTransformer",
"visibility = lms.djangoapps.course_blocks.transformers.visibility:VisibilityTransformer",
"course_blocks_api = lms.djangoapps.course_api.blocks.transformers.blocks_api:BlocksAPITransformer",
"proctored_exam = lms.djangoapps.course_api.blocks.transformers.proctored_exam:ProctoredExamTransformer",
],
}
)
| agpl-3.0 |
yonglehou/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 250 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
GoogleCloudPlatform/public-datasets-pipelines | datasets/scalable_open_source/pipelines/deps_dev/deps_dev_dag.py | 1 | 2740 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.cncf.kubernetes.operators import kubernetes_pod
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2022-05-01",
}
with DAG(
dag_id="scalable_open_source.deps_dev",
default_args=default_args,
max_active_runs=1,
schedule_interval="@weekly",
catchup=False,
default_view="graph",
) as dag:
# Copy deps.dev dataset
copy_bq_datasets = kubernetes_pod.KubernetesPodOperator(
task_id="copy_bq_datasets",
name="copy_bq_datasets",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.scalable_open_source.container_registry.copy_bq_datasets }}",
env_vars={
"SOURCE_PROJECT_ID": "{{ var.json.scalable_open_source.source_project_id }}",
"SOURCE_BQ_DATASET": "{{ var.json.scalable_open_source.source_bq_dataset }}",
"TARGET_PROJECT_ID": "{{ var.json.scalable_open_source.target_project_id }}",
"TARGET_BQ_DATASET": "deps_dev_v1",
"SERVICE_ACCOUNT": "{{ var.json.scalable_open_source.service_account }}",
},
resources={"request_memory": "128M", "request_cpu": "200m"},
)
# Generate BQ views
generate_bq_views = kubernetes_pod.KubernetesPodOperator(
task_id="generate_bq_views",
name="generate_bq_views",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.scalable_open_source.container_registry.generate_bq_views }}",
env_vars={
"SOURCE_PROJECT_ID": "{{ var.json.scalable_open_source.source_project_id }}",
"TARGET_PROJECT_ID": "{{ var.json.scalable_open_source.target_project_id }}",
"SOURCE_DATASET": "{{ var.json.scalable_open_source.source_bq_dataset }}",
"TARGET_DATASET": "deps_dev_v1",
"SERVICE_ACCOUNT": "{{ var.json.scalable_open_source.service_account }}",
},
resources={"request_memory": "128M", "request_cpu": "200m"},
)
copy_bq_datasets >> generate_bq_views
| apache-2.0 |
mxjl620/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 274 | 3790 | # Authors: Lars Buitinck <L.J.Buitinck@uva.nl>
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
yonglehou/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 87 | 2828 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix += dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
datalogai/recurrentshop | examples/recurrent_highway_networks.py | 1 | 5919 | '''
Recurrent Highway Networks
-------------------------------------------------------------------------------
Julian Georg Zilly | Rupesh Kumar Srivastava | Jan Koutnik | Jurgen Schmidhuber
https://arxiv.org/abs/1607.03474
This is an implementation of language modeling experiments
on text8 dataset as specified in the paper
Visit https://github.com/julian121266/RecurrentHighwayNetworks for
implementations using Tensorflow, Torch7 and Brainstorm frameworks
and other datasets
'''
from recurrentshop import RecurrentModel
from recurrentshop.advanced_cells import RHNCell
from keras.models import Model
from keras.layers import Dense, Dropout, Input, Lambda
from keras.layers import add, multiply
from keras.layers import Activation, Embedding
from keras.constraints import max_norm
from keras.initializers import Constant, RandomUniform
from keras.regularizers import l2
from keras.preprocessing.text import Tokenizer
from keras.callbacks import Callback
from keras import backend as K
import numpy as np
import os
import urllib
import zipfile
#
# Hyperparameters
#
batch_size = 128
timesteps = 10
learning_rate = 0.2
hidden_dim = 10
recurrence_depth = 10
weight_decay = 1e-7
lr_decay = 1.04
gradient_clip = 10
embedding_drop = 0.05
output_drop = 0.3
input_drop = 0.3
hidden_drop = 0.05
transform_bias = -4.0
weight_init = RandomUniform(-0.04, 0.04)
def download_data(path):
print('Downloading data . . .')
url = "http://mattmahoney.net/dc/text8.zip"
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
urllib.urlretrieve(url, path)
with zipfile.ZipFile(path) as zf:
zf.extractall(path=path)
def load_text():
recurrentshop_directory = os.path.expanduser('~') + '/.recurrentshop'
datasets_directory = recurrentshop_directory + '/datasets'
FILE_PATH = os.path.join(recurrentshop_directory, datasets_directory, 'text8')
if not os.path.exists(FILE_PATH):
download_data(FILE_PATH)
raw_text = open(FILE_PATH, 'r').read(100000)
tokenizer = Tokenizer(filters='', char_level=True, lower=False)
tokenizer.fit_on_texts(raw_text)
tokenized_text = tokenizer.texts_to_sequences(raw_text)
return tokenized_text, len(tokenizer.word_index)
tokenized_text, vocab_size = load_text()
embedding_dim = vocab_size # Size of character set
def generate_batch(text, batch_size, num_steps):
raw_data = np.squeeze(np.array(text, dtype=np.int32))
data_len = len(raw_data)
batch_len = data_len // batch_size
data = np.zeros([batch_size, batch_len], dtype=np.int32)
for i in range(batch_size):
data[i] = raw_data[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
i = 0
while i in range(epoch_size):
x = data[:, i*num_steps:(i+1)*num_steps]
y = data[:, (i+1)*num_steps]
if i + 1 >= epoch_size:
i = 0
else:
i += 1
yield (x, y)
def RHN(input_dim, hidden_dim, depth):
# Wrapped model
inp = Input(batch_shape=(batch_size, input_dim))
state = Input(batch_shape=(batch_size, hidden_dim))
drop_mask = Input(batch_shape=(batch_size, hidden_dim))
# To avoid all zero mask causing gradient to vanish
inverted_drop_mask = Lambda(lambda x: 1.0 - x, output_shape=lambda s: s)(drop_mask)
drop_mask_2 = Lambda(lambda x: x + 0., output_shape=lambda s: s)(inverted_drop_mask)
dropped_state = multiply([state, inverted_drop_mask])
y, new_state = RHNCell(units=hidden_dim, recurrence_depth=depth,
kernel_initializer=weight_init,
kernel_regularizer=l2(weight_decay),
kernel_constraint=max_norm(gradient_clip),
bias_initializer=Constant(transform_bias),
recurrent_initializer=weight_init,
recurrent_regularizer=l2(weight_decay),
recurrent_constraint=max_norm(gradient_clip))([inp, dropped_state])
return RecurrentModel(input=inp, output=y,
initial_states=[state, drop_mask],
final_states=[new_state, drop_mask_2])
# lr decay Scheduler
class lr_scheduler(Callback):
def on_epoch_begin(self, epoch, logs=None):
if epoch > 5:
lr = self.lr / 1.04
K.set_value(self.model.optimizer.lr, lr)
###########################################
# Build Model
###########################################
inp = Input(batch_shape=(batch_size, timesteps))
x = Dropout(embedding_drop)(inp)
x = Embedding(vocab_size+1, embedding_dim, input_length=timesteps)(inp)
x = Dropout(input_drop)(x)
# Create a dropout mask for variational dropout
drop_mask = Lambda(lambda x: x[:, 0, :1] * 0., output_shape=lambda s: (s[0], 1))(x)
drop_mask = Lambda(lambda x, dim: K.tile(x, (1, dim)),
arguments={'dim': hidden_dim},
output_shape=(hidden_dim,))(drop_mask)
drop_mask = Lambda(K.ones_like, output_shape=lambda s: s)(drop_mask)
drop_mask = Dropout(hidden_drop)(drop_mask)
zero_init = Lambda(K.zeros_like, output_shape=lambda s:s)(drop_mask)
x = RHN(embedding_dim, hidden_dim, recurrence_depth)(x, initial_state=[zero_init, drop_mask])
x = Dropout(output_drop)(x)
out = Dense(vocab_size+1, activation='softmax')(x)
model = Model(inputs=[inp], outputs=[out])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
data_gen = generate_batch(tokenized_text, batch_size, timesteps)
model.fit_generator(generator=data_gen,
steps_per_epoch=(len(tokenized_text)//batch_size),
epochs=5,
verbose=1,
callbacks=[lr_scheduler()])
| mit |
yonglehou/scikit-learn | examples/linear_model/plot_omp.py | 379 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
markslwong/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | 22 | 57502 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class DNNEstimatorTest(test.TestCase):
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNEstimator)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
dnn_estimator = dnn.DNNEstimator(
head=head_lib.multi_class_head(2, weight_column_name='w'),
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
dnn_estimator.fit(input_fn=_input_fn_train, steps=5)
scores = dnn_estimator.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
class DNNClassifierTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNClassifier)
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn.DNNClassifier(
feature_columns=[embedding_language],
hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertEqual(1, classifier.params['input_layer_min_slice_size'])
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predictions_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[10, 10],
label_keys=label_keys,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predicted_classes = classifier.predict_classes(
input_fn=_input_fn, as_iterable=False)
self._assertBinaryPredictions(3, predicted_classes)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllEqual(predicted_classes, predictions)
probabilities = classifier.predict_proba(
input_fn=_input_fn, as_iterable=False)
self._assertProbabilities(3, n_classes, probabilities)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predicted_classes, [1, 0, 0])
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predicted_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(
predicted_proba, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.3)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn.DNNClassifier(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=5)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertIn('dnn/multi_class_head/centered_bias_weight',
classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
class DNNRegressorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNRegressor(
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.2)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.2)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics that use MetricSpec."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3], config=config)
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertIn('dnn/regression_head/centered_bias_weight',
regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertNotIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = dnn.DNNRegressor(feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
markslwong/tensorflow | tensorflow/tools/docs/generate_1_0.py | 18 | 3157 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate docs for the TensorFlow Python API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from tensorflow.python.util import tf_inspect
from tensorflow.tools.docs import generate_lib
if __name__ == '__main__':
doc_generator = generate_lib.DocGenerator()
doc_generator.add_output_dir_argument()
doc_generator.add_src_dir_argument()
# This doc generator works on the TensorFlow codebase. Since this script lives
# at tensorflow/tools/docs, and all code is defined somewhere inside
# tensorflow/, we can compute the base directory (two levels up), which is
# valid unless we're trying to apply this to a different code base, or are
# moving the script around.
script_dir = os.path.dirname(tf_inspect.getfile(tf_inspect.currentframe()))
default_base_dir = os.path.join(script_dir, '..', '..')
doc_generator.add_base_dir_argument(default_base_dir)
flags = doc_generator.parse_known_args()
# tf_debug is not imported with tf, it's a separate module altogether
doc_generator.set_py_modules([('tf', tf), ('tfdbg', tf_debug)])
doc_generator.set_do_not_descend_map({
'': ['cli', 'lib', 'wrappers'],
'contrib': [
'compiler',
'factorization',
'grid_rnn',
'labeled_tensor',
'ndlstm',
'quantization',
'session_bundle',
'slim',
'solvers',
'specs',
'tensor_forest',
'tensorboard',
'testing',
'training',
'tfprof',
],
'contrib.bayesflow': [
'entropy', 'monte_carlo', 'special_math',
'stochastic_gradient_estimators', 'stochastic_graph',
'stochastic_tensor', 'stochastic_variables', 'variational_inference'
],
'contrib.distributions': ['bijector'],
'contrib.ffmpeg': ['ffmpeg_ops'],
'contrib.graph_editor': [
'edit', 'match', 'reroute', 'subgraph', 'transform', 'select', 'util'
],
'contrib.layers': ['feature_column', 'summaries'],
'contrib.learn': [
'datasets',
'head',
'graph_actions',
'io',
'models',
'monitors',
'ops',
'preprocessing',
'utils',
],
'contrib.util': ['loader'],
})
sys.exit(doc_generator.build(flags))
| apache-2.0 |
xuezhisd/DeepLearningTutorials | code/SdA.py | 30 | 18975 | """
This tutorial introduces stacked denoising auto-encoders (SdA) using Theano.
Denoising autoencoders are the building blocks for SdA.
They are based on auto-encoders as the ones used in Bengio et al. 2007.
An autoencoder takes an input x and first maps it to a hidden representation
y = f_{\theta}(x) = s(Wx+b), parameterized by \theta={W,b}. The resulting
latent representation y is then mapped back to a "reconstructed" vector
z \in [0,1]^d in input space z = g_{\theta'}(y) = s(W'y + b'). The weight
matrix W' can optionally be constrained such that W' = W^T, in which case
the autoencoder is said to have tied weights. The network is trained such
that to minimize the reconstruction error (the error between x and z).
For the denosing autoencoder, during training, first x is corrupted into
\tilde{x}, where \tilde{x} is a partially destroyed version of x by means
of a stochastic mapping. Afterwards y is computed as before (using
\tilde{x}), y = s(W\tilde{x} + b) and z as s(W'y + b'). The reconstruction
error is now measured between z and the uncorrupted input x, which is
computed as the cross-entropy :
- \sum_{k=1}^d[ x_k \log z_k + (1-x_k) \log( 1-z_k)]
References :
- P. Vincent, H. Larochelle, Y. Bengio, P.A. Manzagol: Extracting and
Composing Robust Features with Denoising Autoencoders, ICML'08, 1096-1103,
2008
- Y. Bengio, P. Lamblin, D. Popovici, H. Larochelle: Greedy Layer-Wise
Training of Deep Networks, Advances in Neural Information Processing
Systems 19, 2007
"""
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
from dA import dA
# start-snippet-1
class SdA(object):
"""Stacked denoising auto-encoder class (SdA)
A stacked denoising autoencoder model is obtained by stacking several
dAs. The hidden layer of the dA at layer `i` becomes the input of
the dA at layer `i+1`. The first layer dA gets as input the input of
the SdA, and the hidden layer of the last dA represents the output.
Note that after pretraining, the SdA is dealt with as a normal MLP,
the dAs are only used to initialize the weights.
"""
def __init__(
self,
numpy_rng,
theano_rng=None,
n_ins=784,
hidden_layers_sizes=[500, 500],
n_outs=10,
corruption_levels=[0.1, 0.1]
):
""" This class is made to support a variable number of layers.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: numpy random number generator used to draw initial
weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type n_ins: int
:param n_ins: dimension of the input to the sdA
:type n_layers_sizes: list of ints
:param n_layers_sizes: intermediate layers size, must contain
at least one value
:type n_outs: int
:param n_outs: dimension of the output of the network
:type corruption_levels: list of float
:param corruption_levels: amount of corruption to use for each
layer
"""
self.sigmoid_layers = []
self.dA_layers = []
self.params = []
self.n_layers = len(hidden_layers_sizes)
assert self.n_layers > 0
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.x = T.matrix('x') # the data is presented as rasterized images
self.y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
# end-snippet-1
# The SdA is an MLP, for which all weights of intermediate layers
# are shared with a different denoising autoencoders
# We will first construct the SdA as a deep multilayer perceptron,
# and when constructing each sigmoidal layer we also construct a
# denoising autoencoder that shares weights with that layer
# During pretraining we will train these autoencoders (which will
# lead to chainging the weights of the MLP as well)
# During finetunining we will finish training the SdA by doing
# stochastich gradient descent on the MLP
# start-snippet-2
for i in xrange(self.n_layers):
# construct the sigmoidal layer
# the size of the input is either the number of hidden units of
# the layer below or the input size if we are on the first layer
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
# the input to this layer is either the activation of the hidden
# layer below or the input of the SdA if you are on the first
# layer
if i == 0:
layer_input = self.x
else:
layer_input = self.sigmoid_layers[-1].output
sigmoid_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=hidden_layers_sizes[i],
activation=T.nnet.sigmoid)
# add the layer to our list of layers
self.sigmoid_layers.append(sigmoid_layer)
# its arguably a philosophical question...
# but we are going to only declare that the parameters of the
# sigmoid_layers are parameters of the StackedDAA
# the visible biases in the dA are parameters of those
# dA, but not the SdA
self.params.extend(sigmoid_layer.params)
# Construct a denoising autoencoder that shared weights with this
# layer
dA_layer = dA(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W=sigmoid_layer.W,
bhid=sigmoid_layer.b)
self.dA_layers.append(dA_layer)
# end-snippet-2
# We now need to add a logistic layer on top of the MLP
self.logLayer = LogisticRegression(
input=self.sigmoid_layers[-1].output,
n_in=hidden_layers_sizes[-1],
n_out=n_outs
)
self.params.extend(self.logLayer.params)
# construct a function that implements one step of finetunining
# compute the cost for second phase of training,
# defined as the negative log likelihood
self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
# compute the gradients with respect to the model parameters
# symbolic variable that points to the number of errors made on the
# minibatch given by self.x and self.y
self.errors = self.logLayer.errors(self.y)
def pretraining_functions(self, train_set_x, batch_size):
''' Generates a list of functions, each of them implementing one
step in trainnig the dA corresponding to the layer with same index.
The function will require as input the minibatch index, and to train
a dA you just need to iterate, calling the corresponding function on
all minibatch indexes.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared variable that contains all datapoints used
for training the dA
:type batch_size: int
:param batch_size: size of a [mini]batch
:type learning_rate: float
:param learning_rate: learning rate used during training for any of
the dA layers
'''
# index to a [mini]batch
index = T.lscalar('index') # index to a minibatch
corruption_level = T.scalar('corruption') # % of corruption to use
learning_rate = T.scalar('lr') # learning rate to use
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for dA in self.dA_layers:
# get the cost and the updates list
cost, updates = dA.get_cost_updates(corruption_level,
learning_rate)
# compile the theano function
fn = theano.function(
inputs=[
index,
theano.Param(corruption_level, default=0.2),
theano.Param(learning_rate, default=0.1)
],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[batch_begin: batch_end]
}
)
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
def build_finetune_functions(self, datasets, batch_size, learning_rate):
'''Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the error on
a batch from the validation set, and a function `test` that
computes the error on a batch from the testing set
:type datasets: list of pairs of theano.tensor.TensorType
:param datasets: It is a list that contain all the datasets;
the has to contain three pairs, `train`,
`valid`, `test` in this order, where each pair
is formed of two Theano variables, one for the
datapoints, the other for the labels
:type batch_size: int
:param batch_size: size of a minibatch
:type learning_rate: float
:param learning_rate: learning rate used during finetune stage
'''
(train_set_x, train_set_y) = datasets[0]
(valid_set_x, valid_set_y) = datasets[1]
(test_set_x, test_set_y) = datasets[2]
# compute number of minibatches for training, validation and testing
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches /= batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_test_batches /= batch_size
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# compute list of fine-tuning updates
updates = [
(param, param - gparam * learning_rate)
for param, gparam in zip(self.params, gparams)
]
train_fn = theano.function(
inputs=[index],
outputs=self.finetune_cost,
updates=updates,
givens={
self.x: train_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: train_set_y[
index * batch_size: (index + 1) * batch_size
]
},
name='train'
)
test_score_i = theano.function(
[index],
self.errors,
givens={
self.x: test_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: test_set_y[
index * batch_size: (index + 1) * batch_size
]
},
name='test'
)
valid_score_i = theano.function(
[index],
self.errors,
givens={
self.x: valid_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: valid_set_y[
index * batch_size: (index + 1) * batch_size
]
},
name='valid'
)
# Create a function that scans the entire validation set
def valid_score():
return [valid_score_i(i) for i in xrange(n_valid_batches)]
# Create a function that scans the entire test set
def test_score():
return [test_score_i(i) for i in xrange(n_test_batches)]
return train_fn, valid_score, test_score
def test_SdA(finetune_lr=0.1, pretraining_epochs=15,
pretrain_lr=0.001, training_epochs=1000,
dataset='mnist.pkl.gz', batch_size=1):
"""
Demonstrates how to train and test a stochastic denoising autoencoder.
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used in the finetune stage
(factor for the stochastic gradient)
:type pretraining_epochs: int
:param pretraining_epochs: number of epoch to do pretraining
:type pretrain_lr: float
:param pretrain_lr: learning rate to be used during pre-training
:type n_iter: int
:param n_iter: maximal number of iterations ot run the optimizer
:type dataset: string
:param dataset: path the the pickled dataset
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
# numpy random generator
# start-snippet-3
numpy_rng = numpy.random.RandomState(89677)
print '... building the model'
# construct the stacked denoising autoencoder class
sda = SdA(
numpy_rng=numpy_rng,
n_ins=28 * 28,
hidden_layers_sizes=[1000, 1000, 1000],
n_outs=10
)
# end-snippet-3 start-snippet-4
#########################
# PRETRAINING THE MODEL #
#########################
print '... getting the pretraining functions'
pretraining_fns = sda.pretraining_functions(train_set_x=train_set_x,
batch_size=batch_size)
print '... pre-training the model'
start_time = timeit.default_timer()
## Pre-train layer-wise
corruption_levels = [.1, .2, .3]
for i in xrange(sda.n_layers):
# go through pretraining epochs
for epoch in xrange(pretraining_epochs):
# go through the training set
c = []
for batch_index in xrange(n_train_batches):
c.append(pretraining_fns[i](index=batch_index,
corruption=corruption_levels[i],
lr=pretrain_lr))
print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
print numpy.mean(c)
end_time = timeit.default_timer()
print >> sys.stderr, ('The pretraining code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
# end-snippet-4
########################
# FINETUNING THE MODEL #
########################
# get the training, validation and testing function for the model
print '... getting the finetuning functions'
train_fn, validate_model, test_model = sda.build_finetune_functions(
datasets=datasets,
batch_size=batch_size,
learning_rate=finetune_lr
)
print '... finetunning the model'
# early-stopping parameters
patience = 10 * n_train_batches # look as this many examples regardless
patience_increase = 2. # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = timeit.default_timer()
done_looping = False
epoch = 0
while (epoch < training_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_fn(minibatch_index)
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
validation_losses = validate_model()
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if (
this_validation_loss < best_validation_loss *
improvement_threshold
):
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = test_model()
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print(
(
'Optimization complete with best validation score of %f %%, '
'on iteration %i, '
'with test performance %f %%'
)
% (best_validation_loss * 100., best_iter + 1, test_score * 100.)
)
print >> sys.stderr, ('The training code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if __name__ == '__main__':
test_SdA()
| bsd-3-clause |
benoitsteiner/tensorflow-xsmm | tensorflow/contrib/factorization/python/ops/gmm_test.py | 39 | 8716 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments = self.make_random_points(
self.true_centers, self.num_points)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
return (points, assignments)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertLess(score1, score2)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments = self.make_random_points(clusters, num_points)
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
class GMMTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
gmm = gmm_lib.GMM(2, covariance_type='diag')
gmm.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
ageron/tensorflow | tensorflow/contrib/learn/python/learn/datasets/base_test.py | 132 | 3072 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.platform import test
mock = test.mock
_TIMEOUT = IOError(110, "timeout")
class BaseTest(test.TestCase):
"""Test load csv functions."""
def testUrlretrieveRetriesOnIOError(self):
with mock.patch.object(base, "time") as mock_time:
with mock.patch.object(base, "urllib") as mock_urllib:
mock_urllib.request.urlretrieve.side_effect = [
_TIMEOUT, _TIMEOUT, _TIMEOUT, _TIMEOUT, _TIMEOUT, None
]
base.urlretrieve_with_retry("http://dummy.com", "/tmp/dummy")
# Assert full backoff was tried
actual_list = [arg[0][0] for arg in mock_time.sleep.call_args_list]
expected_list = [1, 2, 4, 8, 16]
for actual, expected in zip(actual_list, expected_list):
self.assertLessEqual(abs(actual - expected), 0.25 * expected)
self.assertEquals(len(actual_list), len(expected_list))
def testUrlretrieveRaisesAfterRetriesAreExhausted(self):
with mock.patch.object(base, "time") as mock_time:
with mock.patch.object(base, "urllib") as mock_urllib:
mock_urllib.request.urlretrieve.side_effect = [
_TIMEOUT,
_TIMEOUT,
_TIMEOUT,
_TIMEOUT,
_TIMEOUT,
_TIMEOUT,
]
with self.assertRaises(IOError):
base.urlretrieve_with_retry("http://dummy.com", "/tmp/dummy")
# Assert full backoff was tried
actual_list = [arg[0][0] for arg in mock_time.sleep.call_args_list]
expected_list = [1, 2, 4, 8, 16]
for actual, expected in zip(actual_list, expected_list):
self.assertLessEqual(abs(actual - expected), 0.25 * expected)
self.assertEquals(len(actual_list), len(expected_list))
def testUrlretrieveRaisesOnNonRetriableErrorWithoutRetry(self):
with mock.patch.object(base, "time") as mock_time:
with mock.patch.object(base, "urllib") as mock_urllib:
mock_urllib.request.urlretrieve.side_effect = [
IOError(2, "No such file or directory"),
]
with self.assertRaises(IOError):
base.urlretrieve_with_retry("http://dummy.com", "/tmp/dummy")
# Assert no retries
self.assertFalse(mock_time.called)
if __name__ == "__main__":
test.main()
| apache-2.0 |
ageron/tensorflow | tensorflow/contrib/factorization/python/ops/gmm_test.py | 39 | 8716 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments = self.make_random_points(
self.true_centers, self.num_points)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
return (points, assignments)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertLess(score1, score2)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments = self.make_random_points(clusters, num_points)
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
class GMMTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
gmm = gmm_lib.GMM(2, covariance_type='diag')
gmm.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
mxjl620/scikit-learn | sklearn/neighbors/regression.py | 100 | 11017 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
GoogleCloudPlatform/public-datasets-pipelines | datasets/cdc_places/pipelines/local_data_for_better_health_county_data/local_data_for_better_health_county_data_dag.py | 2 | 5538 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.cncf.kubernetes.operators import kubernetes_pod
from airflow.providers.google.cloud.transfers import gcs_to_bigquery
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-03-01",
}
with DAG(
dag_id="cdc_places.local_data_for_better_health_county_data",
default_args=default_args,
max_active_runs=1,
schedule_interval="@daily",
catchup=False,
default_view="graph",
) as dag:
# Run CSV transform within kubernetes pod
local_data_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="local_data_transform_csv",
startup_timeout_seconds=600,
name="cdc_places_local_data_for_better_health_county_data",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.cdc_places.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "https://chronicdata.cdc.gov/resource/swc5-untb.csv",
"SOURCE_FILE": "files/data.csv",
"TARGET_FILE": "files/data_output.csv",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/cdc_places/local_data_for_better_health_county_data/data_output.csv",
"CSV_HEADERS": '["year","stateabbr","statedesc","locationname","datasource","category","measure","data_value_unit","data_value_type","data_value","data_value_footnote_symbol","data_value_footnote","low_confidence_limit","high_confidence_limit","totalpopulation","locationid","categoryid","measureid","datavaluetypeid","short_question_text","geolocation"]',
"RENAME_MAPPINGS": '{"year": "year","stateabbr": "stateabbr","statedesc": "statedesc","locationname": "locationname","datasource": "datasource","category": "category","measure": "measure","data_value_unit": "data_value_unit","data_value_type": "data_value_type","data_value": "data_value","data_value_footnote_symbol": "data_value_footnote_symbol","data_value_footnote": "data_value_footnote","low_confidence_limit": "low_confidence_limit","high_confidence_limit": "high_confidence_limit","totalpopulation": "totalpopulation","locationid": "locationid","categoryid": "categoryid","measureid": "measureid","datavaluetypeid": "datavaluetypeid","short_question_text": "short_question_text","geolocation": "geolocation"}',
"PIPELINE_NAME": "local_data_for_better_health_county_data",
},
resources={
"request_memory": "4G",
"request_cpu": "2",
"request_ephemeral_storage": "10G",
},
)
# Task to load CSV data to a BigQuery table
load_local_data_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_local_data_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=[
"data/cdc_places/local_data_for_better_health_county_data/data_output.csv"
],
source_format="CSV",
destination_project_dataset_table="cdc_places.local_data_for_better_health_county_data",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{"name": "year", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "stateabbr", "type": "STRING", "mode": "NULLABLE"},
{"name": "statedesc", "type": "STRING", "mode": "NULLABLE"},
{"name": "locationname", "type": "STRING", "mode": "NULLABLE"},
{"name": "datasource", "type": "STRING", "mode": "NULLABLE"},
{"name": "category", "type": "STRING", "mode": "NULLABLE"},
{"name": "measure", "type": "STRING", "mode": "NULLABLE"},
{"name": "data_value_unit", "type": "STRING", "mode": "NULLABLE"},
{"name": "data_value_type", "type": "STRING", "mode": "NULLABLE"},
{"name": "data_value", "type": "FLOAT", "mode": "NULLABLE"},
{
"name": "data_value_footnote_symbol",
"type": "STRING",
"mode": "NULLABLE",
},
{"name": "data_value_footnote", "type": "STRING", "mode": "NULLABLE"},
{"name": "low_confidence_limit", "type": "FLOAT", "mode": "NULLABLE"},
{"name": "high_confidence_limit", "type": "FLOAT", "mode": "NULLABLE"},
{"name": "totalpopulation", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "locationid", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "categoryid", "type": "STRING", "mode": "NULLABLE"},
{"name": "measureid", "type": "STRING", "mode": "NULLABLE"},
{"name": "datavaluetypeid", "type": "STRING", "mode": "NULLABLE"},
{"name": "short_question_text", "type": "STRING", "mode": "NULLABLE"},
{"name": "geolocation", "type": "GEOGRAPHY", "mode": "NULLABLE"},
],
)
local_data_transform_csv >> load_local_data_to_bq
| apache-2.0 |
IONISx/edx-platform | openedx/core/lib/block_cache/tests/test_block_structure_factory.py | 32 | 4070 | """
Tests for block_structure_factory.py
"""
# pylint: disable=protected-access
from mock import patch
from unittest import TestCase
from ..block_structure_factory import BlockStructureFactory
from .test_utils import (
MockCache, MockModulestoreFactory, MockTransformer, ChildrenMapTestMixin
)
class TestBlockStructureFactory(TestCase, ChildrenMapTestMixin):
"""
Tests for BlockStructureFactory
"""
def setUp(self):
super(TestBlockStructureFactory, self).setUp()
self.children_map = self.SIMPLE_CHILDREN_MAP
self.modulestore = MockModulestoreFactory.create(self.children_map)
self.block_structure = BlockStructureFactory.create_from_modulestore(
root_block_usage_key=0, modulestore=self.modulestore
)
self.transformers = [MockTransformer]
mock_registry = patch(
'openedx.core.lib.block_cache.transformer_registry.TransformerRegistry.get_available_plugins'
)
mock_registry.return_value = {transformer.name(): transformer for transformer in self.transformers}
self.addCleanup(mock_registry.stop)
mock_registry.start()
def add_transformers(self):
"""
Add each registered transformer to the block structure.
Mimic collection by setting test transformer block data.
"""
for transformer in self.transformers:
self.block_structure._add_transformer(transformer)
self.block_structure.set_transformer_block_field(
usage_key=0, transformer=transformer, key='test', value='{} val'.format(transformer.name())
)
def test_create_from_modulestore(self):
self.assert_block_structure(self.block_structure, self.children_map)
def test_not_in_cache(self):
cache = MockCache()
self.assertIsNone(
BlockStructureFactory.create_from_cache(
root_block_usage_key=0,
cache=cache,
transformers=self.transformers,
)
)
def test_uncollected_transformers(self):
cache = MockCache()
# serialize the structure to cache, but without collecting any transformer data
BlockStructureFactory.serialize_to_cache(self.block_structure, cache)
with patch('openedx.core.lib.block_cache.block_structure_factory.logger.info') as mock_logger:
# cached data does not have collected information for all registered transformers
self.assertIsNone(
BlockStructureFactory.create_from_cache(
root_block_usage_key=0,
cache=cache,
transformers=self.transformers,
)
)
self.assertTrue(mock_logger.called)
def test_cache(self):
cache = MockCache()
# collect transformer data
self.add_transformers()
# serialize to cache
BlockStructureFactory.serialize_to_cache(self.block_structure, cache)
# test re-create from cache
self.modulestore.get_items_call_count = 0
from_cache_block_structure = BlockStructureFactory.create_from_cache(
root_block_usage_key=0,
cache=cache,
transformers=self.transformers,
)
self.assertIsNotNone(from_cache_block_structure)
self.assert_block_structure(from_cache_block_structure, self.children_map)
self.assertEquals(self.modulestore.get_items_call_count, 0)
def test_remove_from_cache(self):
cache = MockCache()
# collect transformer data
self.add_transformers()
# serialize to cache
BlockStructureFactory.serialize_to_cache(self.block_structure, cache)
# remove from cache
BlockStructureFactory.remove_from_cache(root_block_usage_key=0, cache=cache)
self.assertIsNone(
BlockStructureFactory.create_from_cache(
root_block_usage_key=0,
cache=cache,
transformers=self.transformers
)
)
| agpl-3.0 |
IONISx/edx-platform | lms/djangoapps/course_blocks/usage_info.py | 89 | 1246 | """
Declares CourseUsageInfo class to be used by the transform method in
Transformers.
"""
from lms.djangoapps.courseware.access import _has_access_to_course
class CourseUsageInfo(object):
'''
A class object that encapsulates the course and user context to be
used as currency across block structure transformers, by passing
an instance of it in calls to BlockStructureTransformer.transform
methods.
'''
def __init__(self, course_key, user):
# Course identifier (opaque_keys.edx.keys.CourseKey)
self.course_key = course_key
# User object (django.contrib.auth.models.User)
self.user = user
# Cached value of whether the user has staff access (bool/None)
self._has_staff_access = None
@property
def has_staff_access(self):
'''
Returns whether the user has staff access to the course
associated with this CourseUsageInfo instance.
For performance reasons (minimizing multiple SQL calls), the
value is cached within this instance.
'''
if self._has_staff_access is None:
self._has_staff_access = _has_access_to_course(self.user, 'staff', self.course_key)
return self._has_staff_access
| agpl-3.0 |
mwalton/artificial-olfaction | python/deepRegressor.py | 1 | 7005 | import climate
import theanets
import numpy as np
from sklearn.cross_validation import train_test_split
from os import path
from math import sqrt
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
import platform
def loadData(XPath, yPath):
X = np.genfromtxt(XPath, delimiter=",", dtype="float32")
y = np.genfromtxt(yPath, delimiter=",", dtype="float32")
return (X, y)
def scale(label):
#label[label<1e-5]=1e-5
return np.power(label, 0.25)
#return np.log10(label)
def standardize(featureVector):
scaler = StandardScaler()
return scaler.fit_transform(featureVector)
# model prediction assessment functions
def unit_vector(vector):
""" Returns the unit vector of the input. """
return vector / np.linalg.norm(vector)
"""computes the angle between two vectors in radians"""
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
angle = np.arccos(np.dot(v1_u, v2_u))
if np.isnan(angle):
if (v1_u == v2_u).all():
return 0.0
else:
return np.pi
return angle
"""computes the accuracy given a target and a prediciton vector"""
def vector_accuracy(v1, v2):
#radAngle = angle_between(v1, v2)
#return 1 - (np.degrees(radAngle) / 90)
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.dot(v1_u, v2_u)
def avg_va(yPred, yTrue, minC):
# declare a container to hold the vector accuracy timeseries
vecA = []
# for each timestep, if at least minEvalC is present, compute vector_accuracy
for i in range(np.shape(yTrue)[0]):
if (np.argmax(yTrue[i,:]) > minC):
vecA.append(vector_accuracy(yPred[i,:], yTrue[i,:]))
return np.mean(vecA, dtype=float)
if(platform.system() == 'Darwin'):
basePath="/Users/michaelwalton/Dropbox/Evolved Machines 2014/Machine Learning/datasets/kaggle"
else:
basePath="/home/myke/Dropbox/Evolved Machines 2014/Machine Learning/datasets/kaggle"
xtrainpath=path.join(basePath, "paul_medC_BG2/train/sensorActivation.csv")
ytrainpath=path.join(basePath, "paul_medC_BG2/train/concentration.csv")
xtestpath=path.join(basePath, "paul_highC_BG1/test/sensorActivation.csv")
ytestpath=path.join(basePath, "paul_highC_BG1/test/concentration.csv")
(Xtrain, ytrain) = loadData(xtrainpath, ytrainpath)
(Xtest, ytest) = loadData(xtestpath, ytestpath)
ytest=ytest[:,1:]
ytrain=ytrain[:,1:]
#Xtrain=standardize(Xtrain)
#Xtest=standardize(Xtest)
ytrain=scale(ytrain)
ytest=scale(ytest)
# split up the training data into train and validation
Xtrain, Xvalidate, ytrain, yvalidate = train_test_split(
Xtrain, ytrain, test_size=0.10, random_state=0)
training_data = [Xtrain, ytrain]
validation_data = [Xvalidate, yvalidate]
test_data = [Xtest, ytest]
climate.enable_default_logging()
exp = theanets.Experiment(
theanets.Regressor,
layers=(100, 50, 4),
#hidden_l1=0.1,
)
if (path.isfile("mdl.pkl")):
print "loading model from file"
exp.load("mdl.pkl")
else:
print "training network"
t_loss=[]
v_loss=[]
"""
for t in trainer:
(train,valid) = t
t_loss.append(train['loss'])
v_loss.append(valid['loss'])
fig = plt.figure(figsize=(10,10))
ax1 = fig.add_subplot(111)
ax1.plot(t_loss, c='r', label='Training')
ax1.plot(v_loss, c='b', label='Validation')
ax1.set_xlabel('batch')
ax1.set_ylabel('log(loss)')
ax1.set_yscale('log')
ax1.set_ylim(1e-2,1e-1)
ax1.legend()
plt.show()
"""
"""
exp.train(
training_data,
validation_data,
optimize='sgd',
#learning_rate=0.01,
#momentum=0.5,
)
"""
exp.network.save("mdl.pkl")
print exp.network.params[0]
#print next(m)
y_pls=exp.network.predict(Xtest)
print("Normalized VA: %s\n" % avg_va(y_pls, ytest, 0.001))
pls_rmse=[]
pls_rmse.append(sqrt(mean_squared_error(ytest[:,0], y_pls[:,0])))
pls_rmse.append(sqrt(mean_squared_error(ytest[:,1], y_pls[:,1])))
pls_rmse.append(sqrt(mean_squared_error(ytest[:,2], y_pls[:,2])))
pls_rmse.append(sqrt(mean_squared_error(ytest[:,3], y_pls[:,3])))
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(241)
ax1.plot(y_pls[:,0], c='r', label='NN Fit')
ax1.plot(ytest[:,0], c='grey', label='Target')
ax1.set_xlabel('Time')
ax1.set_ylabel('[c]')
#ax1.set_yscale('log')
ax1.set_title('RED')
ax1.legend()
ax2 = fig.add_subplot(242)
ax2.plot(y_pls[:,1], c='g', label='NN Fit')
ax2.plot(ytest[:,1], c='grey', label='Target')
ax2.set_xlabel('Time')
ax2.set_title('GREEN')
ax2.legend()
ax3 = fig.add_subplot(243)
ax3.plot(y_pls[:,2], c='b', label='NN Fit')
#ax3.plot(y_lin[2], c='r', label='Linear Fit')
#ax3.plot(y_poly[2], c='b', label='Poly Fit')
ax3.plot(ytest[:,2], c='grey', label='Target')
ax3.set_xlabel('Time')
#ax3.set_ylabel('log[c]')
ax3.set_title('BLUE')
ax3.legend()
ax4 = fig.add_subplot(244)
ax4.plot(y_pls[:,3], c='y', label='NN Fit')
#ax4.plot(y_lin[3], c='r', label='Linear Fit')
#ax4.plot(y_poly[3], c='b', label='Poly Fit')
ax4.plot(ytest[:,3], c='grey', label='Target')
ax4.set_xlabel('Time')
#ax4.set_ylabel('log[c]')
ax4.set_title('YELLOW')
ax4.legend()
ax5 = fig.add_subplot(245)
ax5.scatter(ytest[:,0], y_pls[:,0], c='r', label=('NN nRMSE=%0.2f' % pls_rmse[0]))
#ax5.scatter(y[:,0], y_lin[0], c='r', label=('Linear RMSE=%0.2f' % lin_rmse[0]))
#ax5.scatter(y[:,0], y_poly[0], c='b', label=('Polynomial RMSE=%0.2f' % poly_rmse[0]))
ax5.plot(ytest[:,0],ytest[:,0],c='grey')
ax5.set_xlim(np.min(ytest[:,0]), np.max(ytest[:,0]))
ax5.set_xlabel('Prediction')
ax5.set_ylabel('Actual')
ax5.legend()
ax6 = fig.add_subplot(246)
ax6.scatter(ytest[:,1], y_pls[:,1], c='g', label=('NN nRMSE=%0.2f' % pls_rmse[1]))
#ax6.scatter(y[:,1], y_lin[1], c='r', label=('Linear RMSE=%0.2f' % lin_rmse[1]))
#ax6.scatter(y[:,1], y_poly[1], c='b', label=('Polynomial RMSE=%0.2f' % poly_rmse[1]))
ax6.plot(ytest[:,1],ytest[:,1],c='grey')
ax6.set_xlim(np.min(ytest[:,1]), np.max(ytest[:,1]))
ax6.set_xlabel('Prediction')
#ax6.set_ylabel('Actual')
ax6.legend()
ax7 = fig.add_subplot(247)
ax7.scatter(ytest[:,2], y_pls[:,2], c='b', label=('NN nRMSE=%0.2f' % pls_rmse[2]))
#ax7.scatter(y[:,2], y_lin[2], c='r', label=('Linear RMSE=%0.2f' % lin_rmse[2]))
#ax7.scatter(y[:,2], y_poly[2], c='b', label=('Polynomial RMSE=%0.2f' % poly_rmse[2]))
ax7.plot(ytest[:,2],ytest[:,2],c='grey')
ax7.set_xlim(np.min(ytest[:,2]), np.max(ytest[:,2]))
ax7.set_xlabel('Prediction')
#ax7.set_ylabel('Actual')
ax7.legend()
ax8 = fig.add_subplot(248)
ax8.scatter(ytest[:,3], y_pls[:,3], c='y', label=('NN nRMSE=%0.2f' % pls_rmse[3]))
#ax8.scatter(y[:,3], y_lin[3], c='r', label=('Linear RMSE=%0.2f' % lin_rmse[3]))
#ax8.scatter(y[:,3], y_poly[3], c='b', label=('Polynomial RMSE=%0.2f' % poly_rmse[3]))
ax8.plot(ytest[:,3],ytest[:,3],c='grey')
ax8.set_xlim(np.min(ytest[:,3]), np.max(ytest[:,3]))
ax8.set_xlabel('Prediction')
#ax8.set_ylabel('Actual')
ax8.legend()
plt.show()
| mit |
markslwong/tensorflow | tensorflow/contrib/learn/python/learn/estimators/stability_test.py | 110 | 6455 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimator regression tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import optimizer as optimizer_lib
def _get_input_fn(x, y, batch_size=None):
df = data_feeder.setup_train_data_feeder(
x, y, n_classes=None, batch_size=batch_size)
return df.input_builder, df.get_feed_dict_fn()
# We use a null optimizer since we can't get deterministic results out of
# supervisor's multiple threads.
class _NullOptimizer(optimizer_lib.Optimizer):
def __init__(self):
super(_NullOptimizer, self).__init__(use_locking=False, name='Null')
def _apply_dense(self, grad, var):
return control_flow_ops.no_op()
def _apply_sparse(self, grad, var):
return control_flow_ops.no_op()
def _prepare(self):
pass
_NULL_OPTIMIZER = _NullOptimizer()
class StabilityTest(test.TestCase):
"""Tests that estiamtors are reproducible."""
def testRandomStability(self):
my_seed = 42
minval = -0.3333
maxval = 0.3333
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as session:
g.seed = my_seed
x = random_ops.random_uniform([10, 10], minval=minval, maxval=maxval)
val1 = session.run(x)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as session:
g.seed = my_seed
x = random_ops.random_uniform([10, 10], minval=minval, maxval=maxval)
val2 = session.run(x)
self.assertAllClose(val1, val2)
def testLinearRegression(self):
my_seed = 42
config = run_config.RunConfig(tf_random_seed=my_seed)
boston = base.load_boston()
columns = [feature_column.real_valued_column('', dimension=13)]
# We train with
with ops.Graph().as_default() as g1:
random.seed(my_seed)
g1.seed = my_seed
variables.create_global_step()
regressor1 = linear.LinearRegressor(
optimizer=_NULL_OPTIMIZER, feature_columns=columns, config=config)
regressor1.fit(x=boston.data, y=boston.target, steps=1)
with ops.Graph().as_default() as g2:
random.seed(my_seed)
g2.seed = my_seed
variables.create_global_step()
regressor2 = linear.LinearRegressor(
optimizer=_NULL_OPTIMIZER, feature_columns=columns, config=config)
regressor2.fit(x=boston.data, y=boston.target, steps=1)
variable_names = regressor1.get_variable_names()
self.assertIn('linear//weight', variable_names)
self.assertIn('linear/bias_weight', variable_names)
regressor1_weights = regressor1.get_variable_value('linear//weight')
regressor2_weights = regressor2.get_variable_value('linear//weight')
regressor1_bias = regressor1.get_variable_value('linear/bias_weight')
regressor2_bias = regressor2.get_variable_value('linear/bias_weight')
self.assertAllClose(regressor1_weights, regressor2_weights)
self.assertAllClose(regressor1_bias, regressor2_bias)
self.assertAllClose(
list(regressor1.predict_scores(
boston.data, as_iterable=True)),
list(regressor2.predict_scores(
boston.data, as_iterable=True)),
atol=1e-05)
def testDNNRegression(self):
my_seed = 42
config = run_config.RunConfig(tf_random_seed=my_seed)
boston = base.load_boston()
columns = [feature_column.real_valued_column('', dimension=13)]
with ops.Graph().as_default() as g1:
random.seed(my_seed)
g1.seed = my_seed
variables.create_global_step()
regressor1 = dnn.DNNRegressor(
hidden_units=[10],
feature_columns=columns,
optimizer=_NULL_OPTIMIZER,
config=config)
regressor1.fit(x=boston.data, y=boston.target, steps=1)
with ops.Graph().as_default() as g2:
random.seed(my_seed)
g2.seed = my_seed
variables.create_global_step()
regressor2 = dnn.DNNRegressor(
hidden_units=[10],
feature_columns=columns,
optimizer=_NULL_OPTIMIZER,
config=config)
regressor2.fit(x=boston.data, y=boston.target, steps=1)
weights1 = ([regressor1.get_variable_value('dnn/hiddenlayer_0/weights')] +
[regressor1.get_variable_value('dnn/logits/weights')])
weights2 = ([regressor2.get_variable_value('dnn/hiddenlayer_0/weights')] +
[regressor2.get_variable_value('dnn/logits/weights')])
for w1, w2 in zip(weights1, weights2):
self.assertAllClose(w1, w2)
biases1 = ([regressor1.get_variable_value('dnn/hiddenlayer_0/biases')] +
[regressor1.get_variable_value('dnn/logits/biases')])
biases2 = ([regressor2.get_variable_value('dnn/hiddenlayer_0/biases')] +
[regressor2.get_variable_value('dnn/logits/biases')])
for b1, b2 in zip(biases1, biases2):
self.assertAllClose(b1, b2)
self.assertAllClose(
list(regressor1.predict_scores(
boston.data, as_iterable=True)),
list(regressor2.predict_scores(
boston.data, as_iterable=True)),
atol=1e-05)
if __name__ == '__main__':
test.main()
| apache-2.0 |
yonglehou/scikit-learn | sklearn/tests/test_learning_curve.py | 224 | 10791 | # Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
yonglehou/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 142 | 22295 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([0, 1, 2])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
kubernetes/test-infra | kettle/monitor.py | 6 | 2625 | #!/usr/bin/env python3
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A dead-simple Influxdb data pusher to report BigQuery database statistics.
"""
import argparse
import json
import sys
import time
try:
from google.cloud import bigquery
import google.cloud.exceptions
except ImportError:
print('WARNING: unable to load google cloud (test environment?)')
import traceback
traceback.print_exc()
def collect(tables, stale_hours):
stale = False
for table_spec in tables:
print(f'Checking {table_spec}...')
project, dataset_name = table_spec.split(':')
dataset, name = dataset_name.split('.')
table = bigquery.Client(project).dataset(dataset).table(name)
try:
table.reload()
except google.cloud.exceptions.NotFound: # pylint: disable=no-member
continue
# converting datetimes back into epoch-milliseconds is tiresome
# pylint: disable=protected-access
fields = {
'table_spec': table_spec,
'size_bytes': table.num_bytes,
'modified_time': int(table._properties.get('lastModifiedTime')),
'row_count': table.num_rows
}
hours_old = (time.time() - fields['modified_time'] / 1000) / (3600.0)
fields['hours_old'] = hours_old
if stale_hours and hours_old > stale_hours:
print('ERROR: table %s is %.1f hours old. Max allowed: %s hours.' % (
table.table_id, hours_old, stale_hours))
stale = True
print(json.dumps(fields))
print(f'Finished checking tables')
return int(stale)
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--table', nargs='+', required=True,
help='List of datasets to return information about.')
parser.add_argument('--stale', type=int,
help='Number of hours to consider stale.')
opts = parser.parse_args(args)
return collect(opts.table, opts.stale)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| apache-2.0 |
davidam/python-examples | scikit/plot_color_quantization.py | 31 | 3356 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1])
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| gpl-3.0 |
yonglehou/scikit-learn | examples/neighbors/plot_classification.py | 285 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
markslwong/tensorflow | tensorflow/contrib/learn/python/learn/datasets/base.py | 123 | 6584 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base utilities for loading datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
from os import path
import random
import tempfile
import time
import numpy as np
from six.moves import urllib
from tensorflow.contrib.framework import deprecated
from tensorflow.python.platform import gfile
Dataset = collections.namedtuple('Dataset', ['data', 'target'])
Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])
def load_csv_with_header(filename,
target_dtype,
features_dtype,
target_column=-1):
"""Load dataset from CSV file with a header row."""
with gfile.Open(filename) as csv_file:
data_file = csv.reader(csv_file)
header = next(data_file)
n_samples = int(header[0])
n_features = int(header[1])
data = np.zeros((n_samples, n_features), dtype=features_dtype)
target = np.zeros((n_samples,), dtype=target_dtype)
for i, row in enumerate(data_file):
target[i] = np.asarray(row.pop(target_column), dtype=target_dtype)
data[i] = np.asarray(row, dtype=features_dtype)
return Dataset(data=data, target=target)
def load_csv_without_header(filename,
target_dtype,
features_dtype,
target_column=-1):
"""Load dataset from CSV file without a header row."""
with gfile.Open(filename) as csv_file:
data_file = csv.reader(csv_file)
data, target = [], []
for row in data_file:
target.append(row.pop(target_column))
data.append(np.asarray(row, dtype=features_dtype))
target = np.array(target, dtype=target_dtype)
data = np.array(data)
return Dataset(data=data, target=target)
def shrink_csv(filename, ratio):
"""Create a smaller dataset of only 1/ratio of original data."""
filename_small = filename.replace('.', '_small.')
with gfile.Open(filename_small, 'w') as csv_file_small:
writer = csv.writer(csv_file_small)
with gfile.Open(filename) as csv_file:
reader = csv.reader(csv_file)
i = 0
for row in reader:
if i % ratio == 0:
writer.writerow(row)
i += 1
def load_iris(data_path=None):
"""Load Iris dataset.
Args:
data_path: string, path to iris dataset (optional)
Returns:
Dataset object containing data in-memory.
"""
if data_path is None:
module_path = path.dirname(__file__)
data_path = path.join(module_path, 'data', 'iris.csv')
return load_csv_with_header(
data_path,
target_dtype=np.int,
features_dtype=np.float)
def load_boston(data_path=None):
"""Load Boston housing dataset.
Args:
data_path: string, path to boston dataset (optional)
Returns:
Dataset object containing data in-memory.
"""
if data_path is None:
module_path = path.dirname(__file__)
data_path = path.join(module_path, 'data', 'boston_house_prices.csv')
return load_csv_with_header(
data_path,
target_dtype=np.float,
features_dtype=np.float)
def retry(initial_delay,
max_delay,
factor=2.0,
jitter=0.25,
is_retriable=None):
"""Simple decorator for wrapping retriable functions.
Args:
initial_delay: the initial delay.
factor: each subsequent retry, the delay is multiplied by this value.
(must be >= 1).
jitter: to avoid lockstep, the returned delay is multiplied by a random
number between (1-jitter) and (1+jitter). To add a 20% jitter, set
jitter = 0.2. Must be < 1.
max_delay: the maximum delay allowed (actual max is
max_delay * (1 + jitter).
is_retriable: (optional) a function that takes an Exception as an argument
and returns true if retry should be applied.
"""
if factor < 1:
raise ValueError('factor must be >= 1; was %f' % (factor,))
if jitter >= 1:
raise ValueError('jitter must be < 1; was %f' % (jitter,))
# Generator to compute the individual delays
def delays():
delay = initial_delay
while delay <= max_delay:
yield delay * random.uniform(1 - jitter, 1 + jitter)
delay *= factor
def wrap(fn):
"""Wrapper function factory invoked by decorator magic."""
def wrapped_fn(*args, **kwargs):
"""The actual wrapper function that applies the retry logic."""
for delay in delays():
try:
return fn(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except)
if is_retriable is None:
continue
if is_retriable(e):
time.sleep(delay)
else:
raise
return fn(*args, **kwargs)
return wrapped_fn
return wrap
_RETRIABLE_ERRNOS = {
110, # Connection timed out [socket.py]
}
def _is_retriable(e):
return isinstance(e, IOError) and e.errno in _RETRIABLE_ERRNOS
@retry(initial_delay=1.0, max_delay=16.0, is_retriable=_is_retriable)
def urlretrieve_with_retry(url, filename=None):
return urllib.request.urlretrieve(url, filename)
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not gfile.Exists(work_directory):
gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not gfile.Exists(filepath):
temp_file_name, _ = urlretrieve_with_retry(source_url)
gfile.Copy(temp_file_name, filepath)
with gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
| apache-2.0 |
blackye/luscan-devel | thirdparty_libs/nltk/corpus/reader/rte.py | 17 | 4586 | # Natural Language Toolkit: RTE Corpus Reader
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Ewan Klein <ewan@inf.ed.ac.uk>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Corpus reader for the Recognizing Textual Entailment (RTE) Challenge Corpora.
The files were taken from the RTE1, RTE2 and RTE3 datasets and the files
were regularized.
Filenames are of the form rte*_dev.xml and rte*_test.xml. The latter are the
gold standard annotated files.
Each entailment corpus is a list of 'text'/'hypothesis' pairs. The following
example is taken from RTE3::
<pair id="1" entailment="YES" task="IE" length="short" >
<t>The sale was made to pay Yukos' US$ 27.5 billion tax bill,
Yuganskneftegaz was originally sold for US$ 9.4 billion to a little known
company Baikalfinansgroup which was later bought by the Russian
state-owned oil company Rosneft .</t>
<h>Baikalfinansgroup was sold to Rosneft.</h>
</pair>
In order to provide globally unique IDs for each pair, a new attribute
``challenge`` has been added to the root element ``entailment-corpus`` of each
file, taking values 1, 2 or 3. The GID is formatted 'm-n', where 'm' is the
challenge number and 'n' is the pair ID.
"""
from util import *
from api import *
from xmldocs import *
def norm(value_string):
"""
Normalize the string value in an RTE pair's ``value`` or ``entailment``
attribute as an integer (1, 0).
:param value_string: the label used to classify a text/hypothesis pair
:type value_string: str
:rtype: int
"""
valdict = {"TRUE": 1,
"FALSE": 0,
"YES": 1,
"NO": 0}
return valdict[value_string.upper()]
class RTEPair:
"""
Container for RTE text-hypothesis pairs.
The entailment relation is signalled by the ``value`` attribute in RTE1, and by
``entailment`` in RTE2 and RTE3. These both get mapped on to the ``entailment``
attribute of this class.
"""
def __init__(self, pair, challenge=None, id=None, text=None, hyp=None,
value=None, task=None, length=None):
"""
:param challenge: version of the RTE challenge (i.e., RTE1, RTE2 or RTE3)
:param id: identifier for the pair
:param text: the text component of the pair
:param hyp: the hypothesis component of the pair
:param value: classification label for the pair
:param task: attribute for the particular NLP task that the data was drawn from
:param length: attribute for the length of the text of the pair
"""
self.challenge = challenge
self.id = pair.attrib["id"]
self.gid = "%s-%s" % (self.challenge, self.id)
self.text = pair[0].text
self.hyp = pair[1].text
if "value" in pair.attrib:
self.value = norm(pair.attrib["value"])
elif "entailment" in pair.attrib:
self.value = norm(pair.attrib["entailment"])
else:
self.value = value
if "task" in pair.attrib:
self.task = pair.attrib["task"]
else:
self.task = task
if "length" in pair.attrib:
self.length = pair.attrib["length"]
else:
self.length = length
def __repr__(self):
if self.challenge:
return '<RTEPair: gid=%s-%s>' % (self.challenge, self.id)
else:
return '<RTEPair: id=%s>' % self.id
class RTECorpusReader(XMLCorpusReader):
"""
Corpus reader for corpora in RTE challenges.
This is just a wrapper around the XMLCorpusReader. See module docstring above for the expected
structure of input documents.
"""
def _read_etree(self, doc):
"""
Map the XML input into an RTEPair.
This uses the ``getiterator()`` method from the ElementTree package to
find all the ``<pair>`` elements.
:param doc: a parsed XML document
:rtype: list(RTEPair)
"""
try:
challenge = doc.attrib['challenge']
except KeyError:
challenge = None
return [RTEPair(pair, challenge=challenge)
for pair in doc.getiterator("pair")]
def pairs(self, fileids):
"""
Build a list of RTEPairs from a RTE corpus.
:param fileids: a list of RTE corpus fileids
:type: list
:rtype: list(RTEPair)
"""
if isinstance(fileids, basestring): fileids = [fileids]
return concat([self._read_etree(self.xml(fileid)) for fileid in fileids])
| gpl-2.0 |
dimkal/mne-python | examples/forward/plot_make_forward.py | 20 | 2669 | """
======================================================
Create a forward operator and display sensitivity maps
======================================================
Sensitivity maps can be produced from forward operators that
indicate how well different sensor types will be able to detect
neural currents from different regions of the brain.
"""
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
trans = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
src = data_path + '/subjects/sample/bem/sample-oct-6-src.fif'
bem = data_path + '/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif'
subjects_dir = data_path + '/subjects'
# Note that forward solutions can also be read with read_forward_solution
fwd = mne.make_forward_solution(raw_fname, trans, src, bem,
fname=None, meg=True, eeg=True, mindist=5.0,
n_jobs=2, overwrite=True)
# convert to surface orientation for better visualization
fwd = mne.convert_forward_solution(fwd, surf_ori=True)
leadfield = fwd['sol']['data']
print("Leadfield size : %d x %d" % leadfield.shape)
grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
###############################################################################
# Show gain matrix a.k.a. leadfield matrix with sensitivity map
picks_meg = mne.pick_types(fwd['info'], meg=True, eeg=False)
picks_eeg = mne.pick_types(fwd['info'], meg=False, eeg=True)
fig, axes = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
fig.suptitle('Lead field matrix (500 dipoles only)', fontsize=14)
for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ['meg', 'eeg']):
im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto',
cmap='RdBu_r')
ax.set_title(ch_type.upper())
ax.set_xlabel('sources')
ax.set_ylabel('sensors')
plt.colorbar(im, ax=ax, cmap='RdBu_r')
plt.show()
plt.figure()
plt.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()],
bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'],
color=['c', 'b', 'k'])
plt.legend()
plt.title('Normal orientation sensitivity')
plt.xlabel('sensitivity')
plt.ylabel('count')
plt.show()
grad_map.plot(time_label='Gradiometer sensitivity', subjects_dir=subjects_dir,
clim=dict(lims=[0, 50, 100]))
| bsd-3-clause |
Cyber-Neuron/inception_v3 | inception/inception/image_processing.py | 1 | 20664 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Read and preprocess image data.
Image processing occurs on a single image at a time. Image are read and
preprocessed in parallel across multiple threads. The resulting images
are concatenated together to form a single batch for training or evaluation.
-- Provide processed image data for a network:
inputs: Construct batches of evaluation examples of images.
distorted_inputs: Construct batches of training examples of images.
batch_inputs: Construct batches of training or evaluation examples of images.
-- Data processing:
parse_example_proto: Parses an Example proto containing a training example
of an image.
-- Image decoding:
decode_jpeg: Decode a JPEG encoded string into a 3-D float32 Tensor.
-- Image preprocessing:
image_preprocessing: Decode and preprocess one image for evaluation or training
distort_image: Distort one image for training a network.
eval_image: Prepare one image for evaluation.
distort_color: Distort the color in one image for training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 32,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_integer('image_size', 299,
"""Provide square images of this size.""")
tf.app.flags.DEFINE_integer('num_preprocess_threads', 1,
"""Number of preprocessing threads per tower. """
"""Please make this a multiple of 4.""")
tf.app.flags.DEFINE_integer('num_readers', 4,
"""Number of parallel readers during train.""")
# Images are preprocessed asynchronously using multiple threads specified by
# --num_preprocss_threads and the resulting processed images are stored in a
# random shuffling queue. The shuffling queue dequeues --batch_size images
# for processing on a given Inception tower. A larger shuffling queue guarantees
# better mixing across examples within a batch and results in slightly higher
# predictive performance in a trained model. Empirically,
# --input_queue_memory_factor=16 works well. A value of 16 implies a queue size
# of 1024*16 images. Assuming RGB 299x299 images, this implies a queue size of
# 16GB. If the machine is memory limited, then decrease this factor to
# decrease the CPU memory footprint, accordingly.
tf.app.flags.DEFINE_integer('input_queue_memory_factor', 16,
"""Size of the queue of preprocessed images. """
"""Default is ideal but try smaller values, e.g. """
"""4, 2 or 1, if host memory is constrained. See """
"""comments in code for more details.""")
def inputs(dataset, batch_size=None, num_preprocess_threads=None):
"""Generate batches of ImageNet images for evaluation.
Use this function as the inputs for evaluating a network.
Note that some (minimal) image preprocessing occurs during evaluation
including central cropping and resizing of the image to fit the network.
Args:
dataset: instance of Dataset class specifying the dataset.
batch_size: integer, number of examples in batch
num_preprocess_threads: integer, total number of preprocessing threads but
None defaults to FLAGS.num_preprocess_threads.
Returns:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
image_size, 3].
labels: 1-D integer Tensor of [FLAGS.batch_size].
"""
if not batch_size:
batch_size = FLAGS.batch_size
# Force all input processing onto CPU in order to reserve the GPU for
# the forward inference and back-propagation.
with tf.device('/cpu:0'):
images, labels, filenames = batch_inputs(
dataset, batch_size, train=False,
num_preprocess_threads=num_preprocess_threads,
num_readers=1)
return images, labels, filenames
def distorted_inputs(dataset, batch_size=None, num_preprocess_threads=None):
"""Generate batches of distorted versions of ImageNet images.
Use this function as the inputs for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
dataset: instance of Dataset class specifying the dataset.
batch_size: integer, number of examples in batch
num_preprocess_threads: integer, total number of preprocessing threads but
None defaults to FLAGS.num_preprocess_threads.
Returns:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
FLAGS.image_size, 3].
labels: 1-D integer Tensor of [batch_size].
"""
if not batch_size:
batch_size = FLAGS.batch_size
# Force all input processing onto CPU in order to reserve the GPU for
# the forward inference and back-propagation.
with tf.device('/cpu:0'):
images, labels, _ = batch_inputs(
dataset, batch_size, train=True,
num_preprocess_threads=num_preprocess_threads,
num_readers=FLAGS.num_readers)
return images, labels
def decode_jpeg(image_buffer, scope=None):
"""Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
"""
with tf.op_scope([image_buffer], scope, 'decode_jpeg'):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3)
# After this point, all image pixels reside in [0,1)
# until the very end, when they're rescaled to (-1, 1). The various
# adjust_* ops all require this range for dtype float.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def distort_color(image, thread_id=0, scope=None):
"""Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: Tensor containing single image.
thread_id: preprocessing thread ID.
scope: Optional scope for op_scope.
Returns:
color-distorted image
"""
with tf.op_scope([image], scope, 'distort_color'):
color_ordering = thread_id % 2
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def distort_image(image, height, width, bbox, thread_id=0, scope=None):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
image: 3-D float Tensor of image
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
thread_id: integer indicating the preprocessing thread.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor of distorted image used for training.
"""
with tf.op_scope([image, height, width, bbox], scope, 'distort_image'):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# Display the bounding box in the first thread only.
if not thread_id:
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
tf.image_summary('image_with_bounding_boxes', image_with_box)
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an allowed
# range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.05, 1.0],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
if not thread_id:
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distort_bbox)
tf.image_summary('images_with_distorted_bounding_box',
image_with_distorted_box)
# Crop the image to the specified bounding box.
distorted_image = tf.slice(image, bbox_begin, bbox_size)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
resize_method = thread_id % 4
distorted_image = tf.image.resize_images(distorted_image, [height, width],
method=resize_method)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([height, width, 3])
if not thread_id:
tf.image_summary('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors.
distorted_image = distort_color(distorted_image, thread_id)
if not thread_id:
tf.image_summary('final_distorted_image',
tf.expand_dims(distorted_image, 0))
return distorted_image
def eval_image(image, height, width, scope=None):
"""Prepare one image for evaluation.
Args:
image: 3-D float Tensor
height: integer
width: integer
scope: Optional scope for op_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.op_scope([image, height, width], scope, 'eval_image'):
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = tf.image.central_crop(image, central_fraction=0.875)
# Resize the image to the original height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
return image
def image_preprocessing(image_buffer, bbox, train, thread_id=0):
"""Decode and preprocess one image for evaluation or training.
Args:
image_buffer: JPEG encoded string Tensor
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
train: boolean
thread_id: integer indicating preprocessing thread
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if bbox is None:
raise ValueError('Please supply a bounding box.')
image = decode_jpeg(image_buffer)
height = FLAGS.image_size
width = FLAGS.image_size
if train:
image = distort_image(image, height, width, bbox, thread_id)
else:
image = eval_image(image, height, width)
# Finally, rescale to [-1,1] instead of [0, 1)
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
return image
def debug_print(y):
with tf.Session():
print(y.eval())
def parse_example_proto(example_serialized):
"""Parses an Example proto containing a training example of an image.
The output of the build_image_data.py image preprocessing script is a dataset
containing serialized Example protocol buffers. Each Example proto contains
the following fields:
image/height: 462
image/width: 581
image/colorspace: 'RGB'
image/channels: 3
image/class/label: 615
image/class/synset: 'n03623198'
image/class/text: 'knee pad'
image/object/bbox/xmin: 0.1
image/object/bbox/xmax: 0.9
image/object/bbox/ymin: 0.2
image/object/bbox/ymax: 0.6
image/object/bbox/label: 615
image/format: 'JPEG'
image/filename: 'ILSVRC2012_val_00041207.JPEG'
image/encoded: <JPEG encoded string>
Args:
example_serialized: scalar Tensor tf.string containing a serialized
Example protocol buffer.
Returns:
image_buffer: Tensor tf.string containing the contents of a JPEG file.
label: Tensor tf.int32 containing the label.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
text: Tensor tf.string containing the human-readable label.
"""
# Dense features in Example proto.
feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
default_value=-1),
'image/class/text': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/filename': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
}
sparse_float32 = tf.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto.
feature_map.update(
{k: sparse_float32 for k in ['image/object/bbox/xmin',
'image/object/bbox/ymin',
'image/object/bbox/xmax',
'image/object/bbox/ymax']})
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat(0, [ymin, xmin, ymax, xmax])
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
return features['image/encoded'], label, bbox, features['image/class/text'], features['image/filename']
def batch_inputs(dataset, batch_size, train, num_preprocess_threads=None,
num_readers=1):
"""Contruct batches of training or evaluation examples from the image dataset.
Args:
dataset: instance of Dataset class specifying the dataset.
See dataset.py for details.
batch_size: integer
train: boolean
num_preprocess_threads: integer, total number of preprocessing threads
num_readers: integer, number of parallel readers
Returns:
images: 4-D float Tensor of a batch of images
labels: 1-D integer Tensor of [batch_size].
filename list: the list of filename
Raises:
ValueError: if data is not found
"""
with tf.name_scope('batch_processing'):
data_files = dataset.data_files()
if data_files is None:
raise ValueError('No data files found for this dataset')
# Create filename_queue
if train:
filename_queue = tf.train.string_input_producer(data_files,
shuffle=True,
capacity=16)
else:
filename_queue = tf.train.string_input_producer(data_files,
shuffle=False,
capacity=1)
if num_preprocess_threads is None:
num_preprocess_threads = FLAGS.num_preprocess_threads
if num_preprocess_threads % 4:
_=1
#raise ValueError('Please make num_preprocess_threads a multiple '
# 'of 4 (%d % 4 != 0).', num_preprocess_threads)
if num_readers is None:
num_readers = FLAGS.num_readers
if num_readers < 1:
raise ValueError('Please make num_readers at least 1')
# Approximate number of examples per shard.
examples_per_shard = 1024
# Size the random shuffle queue to balance between good global
# mixing (more examples) and memory use (fewer examples).
# 1 image uses 299*299*3*4 bytes = 1MB
# The default input_queue_memory_factor is 16 implying a shuffling queue
# size: examples_per_shard * 16 * 1MB = 17.6GB
min_queue_examples = examples_per_shard * FLAGS.input_queue_memory_factor
if train:
examples_queue = tf.RandomShuffleQueue(
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples,
dtypes=[tf.string])
else:
examples_queue = tf.FIFOQueue(
capacity=examples_per_shard + 3 * batch_size,
dtypes=[tf.string])
# Create multiple readers to populate the queue of examples.
if num_readers > 1:
enqueue_ops = []
for _ in range(num_readers):
reader = dataset.reader()
_, value = reader.read(filename_queue)
enqueue_ops.append(examples_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(
tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))
example_serialized = examples_queue.dequeue()
else:
reader = dataset.reader()
_, example_serialized = reader.read(filename_queue)
images_and_labels = []
for thread_id in range(num_preprocess_threads):
# Parse a serialized Example proto to extract the image and metadata.
image_buffer, label_index, bbox, _, filename = parse_example_proto(
example_serialized)
image = image_preprocessing(image_buffer, bbox, train, thread_id)
images_and_labels.append([image, label_index,filename])
images, label_index_batch,filenames = tf.train.batch_join(
images_and_labels,
batch_size=batch_size,
capacity=3 * num_preprocess_threads * batch_size)
# Reshape images into these desired dimensions.
height = FLAGS.image_size
width = FLAGS.image_size
depth = 3
images = tf.cast(images, tf.float32)
images = tf.reshape(images, shape=[batch_size, height, width, depth])
# Display the training images in the visualizer.
tf.image_summary('images', images)
return images, tf.reshape(label_index_batch, [batch_size]), tf.reshape(filenames, [batch_size])
| apache-2.0 |
GoogleCloudPlatform/public-datasets-pipelines | datasets/fec/pipelines/committee_contributions_2020/committee_contributions_2020_dag.py | 1 | 7284 | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.cncf.kubernetes.operators import kubernetes_pod
from airflow.providers.google.cloud.transfers import gcs_to_bigquery
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-03-01",
}
with DAG(
dag_id="fec.committee_contributions_2020",
default_args=default_args,
max_active_runs=1,
schedule_interval="@daily",
catchup=False,
default_view="graph",
) as dag:
# Run CSV transform within kubernetes pod
committee_contributions_2020_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="committee_contributions_2020_transform_csv",
startup_timeout_seconds=600,
name="committee_contributions_2020",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.fec.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "https://www.fec.gov/files/bulk-downloads/2020/pas220.zip",
"SOURCE_FILE_ZIP_FILE": "files/zip_file.zip",
"SOURCE_FILE_PATH": "files/",
"SOURCE_FILE": "files/itpas2.txt",
"TARGET_FILE": "files/data_output.csv",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/fec/committee_contributions_2020/data_output.csv",
"PIPELINE_NAME": "committee_contributions_2020",
"CSV_HEADERS": '["cmte_id","amndt_ind","rpt_tp","transaction_pgi","image_num","transaction_tp","entity_tp", "name","city","state","zip_code","employer","occupation","transaction_dt","transaction_amt","other_id", "cand_id","tran_id","file_num","memo_cd","memo_text","sub_id"]',
},
resources={
"request_memory": "4G",
"request_cpu": "1",
"request_ephemeral_storage": "10G",
},
)
# Task to load CSV data to a BigQuery table
load_committee_contributions_2020_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_committee_contributions_2020_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=["data/fec/committee_contributions_2020/data_output.csv"],
source_format="CSV",
destination_project_dataset_table="fec.committee_contributions_2020",
skip_leading_rows=1,
allow_quoted_newlines=True,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "cmte_id",
"type": "string",
"description": "Filer Identification Number",
"mode": "nullable",
},
{
"name": "amndt_ind",
"type": "string",
"description": "Amendment Indicator",
"mode": "nullable",
},
{
"name": "rpt_tp",
"type": "string",
"description": "Report Type",
"mode": "nullable",
},
{
"name": "transaction_pgi",
"type": "string",
"description": "Primary-General Indicator",
"mode": "nullable",
},
{
"name": "image_num",
"type": "integer",
"description": "Image Number",
"mode": "nullable",
},
{
"name": "transaction_tp",
"type": "string",
"description": "Transaction Type",
"mode": "nullable",
},
{
"name": "entity_tp",
"type": "string",
"description": "Entity Type",
"mode": "nullable",
},
{
"name": "name",
"type": "string",
"description": "Recipient/Payee",
"mode": "nullable",
},
{
"name": "city",
"type": "string",
"description": "City/Town",
"mode": "nullable",
},
{
"name": "state",
"type": "string",
"description": "State",
"mode": "nullable",
},
{
"name": "zip_code",
"type": "string",
"description": "Zip Code",
"mode": "nullable",
},
{
"name": "employer",
"type": "string",
"description": "Employer",
"mode": "nullable",
},
{
"name": "occupation",
"type": "string",
"description": "Occupation",
"mode": "nullable",
},
{
"name": "transaction_dt",
"type": "date",
"description": "Transaction Date(MMDDYYYY)",
"mode": "nullable",
},
{
"name": "transaction_amt",
"type": "integer",
"description": "Transaction Amount",
"mode": "nullable",
},
{
"name": "other_id",
"type": "string",
"description": "Other Identification Number",
"mode": "nullable",
},
{
"name": "cand_id",
"type": "string",
"description": "Candidate Identification Number",
"mode": "nullable",
},
{
"name": "tran_id",
"type": "string",
"description": "Transaction ID",
"mode": "nullable",
},
{
"name": "file_num",
"type": "integer",
"description": "File Number / Report ID",
"mode": "nullable",
},
{
"name": "memo_cd",
"type": "string",
"description": "Memo Code",
"mode": "nullable",
},
{
"name": "memo_text",
"type": "string",
"description": "Memo Text",
"mode": "nullable",
},
{
"name": "sub_id",
"type": "integer",
"description": "FEC Record Number",
"mode": "required",
},
],
)
(
committee_contributions_2020_transform_csv
>> load_committee_contributions_2020_to_bq
)
| apache-2.0 |
davidam/python-examples | scikit/classify/document_clustering.py | 20 | 8531 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent semantic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
# #############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
# categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', alternate_sign=False,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
alternate_sign=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
# #############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| gpl-3.0 |
yonglehou/scikit-learn | sklearn/tests/test_isotonic.py | 228 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
ageron/tensorflow | tensorflow/python/distribute/input_lib.py | 3 | 19584 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various classes representing distributed inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import input_ops
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
class InputWorkers(object):
"""A 1-to-many mapping from input worker devices to compute devices."""
def __init__(self, device_map, worker_device_pairs=None, logical_device=0):
"""Initialize an `InputWorkers` object.
Args:
device_map: A `DeviceMap` with the computation devices fed by the
input workers.
worker_device_pairs: A sequence of pairs:
`(input device, a tuple of compute devices fed by that input device)`.
logical_device: The logical device of `device_map` to feed.
"""
self._device_map = device_map
self._logical_device = logical_device
if worker_device_pairs is None:
worker_device_pairs = ((
device_util.canonicalize("/device:CPU:0"),
device_map.logical_to_actual_devices(logical_device)),)
self._input_worker_devices = tuple(d for d, _ in worker_device_pairs)
self._fed_devices = tuple(tuple(device_util.canonicalize(d) for d in f)
for _, f in worker_device_pairs)
flattened = tuple(d for l in self._fed_devices for d in l)
assert (flattened ==
device_map.logical_to_actual_devices(logical_device)), (
"flattened: %s logical device %d: %s" %
(flattened, logical_device,
device_map.logical_to_actual_devices(logical_device)))
@property
def device_map(self):
return self._device_map
@property
def logical_device(self):
return self._logical_device
@property
def num_workers(self):
return len(self._input_worker_devices)
@property
def worker_devices(self):
return self._input_worker_devices
def compute_devices_for_worker(self, worker_index):
return self._fed_devices[worker_index]
def __repr__(self):
devices = self.worker_devices
debug_repr = ",\n".join(" %d %s: %s" %
(i, devices[i], self._fed_devices[i])
for i in range(len(devices)))
return "%s:{\n%s\n device_map: %s}" % (
self.__class__.__name__, debug_repr, self._device_map)
class InputIterator(object):
"""An input iterator, intended to be passed to `DistributionStrategy.run`."""
def get_next(self):
"""Returns the next inputs for all replicas."""
raise NotImplementedError("must be implemented in descendants")
def initialize(self):
"""Initialize the underlying input dataset, when applicable.
In eager mode, this will create a new iterator and return it.
In graph mode, this will initialize the same underlying iterator(s).
Users are required to call this if
- This iterator was returned from a call to `make_input_fn_iterator` with an
input function that returns a dataset.
- Or this iterator was returned from a call to `make_dataset_iterator`.
Returns:
A list of initialization ops to be executed.
"""
raise NotImplementedError("must be implemented in descendants")
class InputIteratorImpl(InputIterator):
"""Common implementation for all input iterators."""
def __init__(self, input_workers, iterators):
assert isinstance(input_workers, InputWorkers)
if not input_workers.worker_devices:
raise ValueError("Should have at least one worker for input iterator.")
self._iterators = iterators
self._input_workers = input_workers
def get_next(self, name=None):
"""Returns the next input from the iterator for all replicas."""
replicas = []
for i, worker in enumerate(self._input_workers.worker_devices):
if name is not None:
d = tf_device.DeviceSpec.from_string(worker)
new_name = "%s_%s_%d" % (name, d.job, d.task)
else:
new_name = None
with ops.device(worker):
# Make `replicas` a flat list of values across all replicas.
replicas.extend(self._iterators[i].get_next_as_list(new_name))
return values.regroup(self._input_workers.device_map, replicas)
def initialize(self):
"""Initialze underlying iterators.
Returns:
A list of any initializer ops that should be run.
"""
init_ops = []
for it in self._iterators:
init_ops.extend(it.initialize())
return init_ops
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
@property
def output_classes(self):
return self._iterators[0].output_classes
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
@property
def output_shapes(self):
return self._iterators[0].output_shapes
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
@property
def output_types(self):
return self._iterators[0].output_types
# TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs.
def get_iterator(self, worker):
for i, w in enumerate(self._input_workers.worker_devices):
if worker == w:
return self._iterators[i]
return None
class InputFunctionIterator(InputIteratorImpl):
"""Iterator created from input function."""
def __init__(self, input_fn, input_workers, input_contexts):
"""Make an iterator for input provided via an input function.
Currently implements PER_WORKER mode, in which the `input_fn` is called
once on each worker.
TODO(priyag): Add other replication modes.
Args:
input_fn: Input function that returns a `tf.data.Dataset` object.
input_workers: an `InputWorkers` object.
input_contexts: A list of `InputContext` instances to be passed to call(s)
to `input_fn`. Length and order should match worker order in
`worker_device_pairs`.
"""
assert isinstance(input_workers, InputWorkers)
if input_workers.num_workers != len(input_contexts):
raise ValueError(
"Number of input workers (%d) is not same as number of "
"input_contexts (%d)" %
(input_workers.num_workers, len(input_contexts)))
iterators = []
for i, ctx in enumerate(input_contexts):
worker = input_workers.worker_devices[i]
with ops.device(worker):
result = input_fn(ctx)
devices = input_workers.compute_devices_for_worker(i)
if isinstance(result, dataset_ops.DatasetV2):
iterator = _SingleWorkerDatasetIterator(result, worker, devices)
elif callable(result):
iterator = _SingleWorkerCallableIterator(result, worker, devices)
else:
raise ValueError(
"input_fn must return a tf.data.Dataset or a callable.")
iterators.append(iterator)
super(InputFunctionIterator, self).__init__(input_workers, iterators)
class DatasetIterator(InputIteratorImpl):
"""Iterator created from input dataset."""
def __init__(self, dataset, input_workers, split_batch_by=None):
"""Make an iterator for the dataset on given devices.
If `split_batch_by` is not None, we "split" each batch of the
dataset by `split_batch_by` value. To achieve this, we first unbatch the
input dataset and then rebatch it with the per replica batch size that is
calculated using `global_batch_size // split_batch_by`.
The currently supported datasets are as follows:
`dataset.batch()` is the last operation on the dataset OR
`dataset.apply(map_and_batch)` is the last operation on the dataset OR
`dataset.batch().prefetch()` are the last 2 operations on the dataset OR
`dataset.apply(map_and_batch).prefetch()` are the last 2 operations.
TODO(priyag): Support multi worker / host cases properly by cloning
and sharding the dataset on each worker. Current setup will only work in
some cases, such as in-graph multi worker GPU case. If the input pipeline
has random shuffling (with a different seed on each worker), each worker
will see random input from the same overall dataset in each step. Otherwise,
each worker will see the same input in each step.
Args:
dataset: `tf.data.Dataset` that will be used as the input source.
input_workers: an `InputWorkers` object.
split_batch_by: Optional integer. If present, we "split" each batch of the
dataset by `split_batch_by` value.
"""
assert isinstance(input_workers, InputWorkers)
if split_batch_by:
dataset = batching._RebatchDataset(dataset, split_batch_by) # pylint: disable=protected-access
iterators = []
for i, worker in enumerate(input_workers.worker_devices):
with ops.device(worker):
worker_devices = input_workers.compute_devices_for_worker(i)
cloned_dataset = dataset
if not context.executing_eagerly():
cloned_dataset = input_ops._clone_dataset(dataset) # pylint: disable=protected-access
cloned_dataset = cloned_dataset.with_options(dataset.options())
iterator = _SingleWorkerDatasetIterator(cloned_dataset, worker,
worker_devices)
iterators.append(iterator)
self._element_structure = dataset._element_structure # pylint: disable=protected-access
super(DatasetIterator, self).__init__(input_workers, iterators)
class _SingleWorkerDatasetIterator(object):
"""Iterator for a single `tf.data.Dataset`."""
def __init__(self, dataset, worker, devices):
"""Create iterator for the `dataset` to fetch data to worker's `devices` .
`MultiDeviceIterator` is used to prefetch input to the devices on the
given worker.
Args:
dataset: A `tf.data.Dataset` instance.
worker: Worker on which ops should be created.
devices: Distribute data from `dataset` to these devices.
"""
self._dataset = dataset
self._worker = worker
self._devices = devices
self._make_iterator()
def _make_iterator(self):
"""Make appropriate iterator on the dataset."""
with ops.device(self._worker):
self._iterator = multi_device_iterator_ops.MultiDeviceIterator(
self._dataset, self._devices)
def get_next_as_list(self, name=None):
"""Get next element from the underlying iterator."""
del name
with ops.device(self._worker):
data_list = self._iterator.get_next()
return data_list
def initialize(self):
"""Initialze underlying iterator.
In eager execution, this simply recreates the underlying iterator.
In graph execution, it returns the initializer ops for the underlying
iterator.
Returns:
A list of any initializer ops that should be run.
"""
if context.executing_eagerly():
self._iterator._eager_reset() # pylint: disable=protected-access
return []
else:
return [self._iterator.initializer]
@property
def output_classes(self):
return dataset_ops.get_legacy_output_classes(self._iterator)
@property
def output_shapes(self):
return dataset_ops.get_legacy_output_shapes(self._iterator)
@property
def output_types(self):
return dataset_ops.get_legacy_output_types(self._iterator)
class _SingleWorkerCallableIterator(object):
"""Iterator for a single tensor-returning callable."""
def __init__(self, fn, worker, devices):
self._fn = fn
self._worker = worker
self._devices = devices
def get_next_as_list(self, name=None):
"""Get next element from the callable."""
del name
with ops.device(self._worker):
data_list = [self._fn() for _ in self._devices]
return data_list
def initialize(self):
# TODO(petebu) Should this throw an exception instead?
return []
# TODO(sourabhbajaj): Remove this in lieu of distributed datasets
def _get_batched_dataset(d):
"""Get the batched dataset from `d`."""
# pylint: disable=protected-access
if isinstance(d, dataset_ops.DatasetV1Adapter):
d = d._dataset
if isinstance(d, (dataset_ops.BatchDataset, batching._MapAndBatchDataset)):
return d
elif isinstance(d, (dataset_ops.PrefetchDataset,
dataset_ops._OptionsDataset)):
return _get_batched_dataset(d._input_dataset)
raise ValueError(
"Unable to get batched dataset from the input dataset. `batch` "
"`map_and_batch` need to be the last operations on the dataset. "
"The batch operations can be followed by a prefetch.")
def _get_batched_dataset_attributes(d):
"""Get `batch_size`, `drop_remainder` of dataset."""
# pylint: disable=protected-access
assert isinstance(d,
(dataset_ops.BatchDataset, batching._MapAndBatchDataset))
if isinstance(d, dataset_ops.BatchDataset):
batch_size = d._batch_size
drop_remainder = d._drop_remainder
elif isinstance(d, batching._MapAndBatchDataset):
batch_size = d._batch_size_t
drop_remainder = d._drop_remainder_t
# pylint: enable=protected-access
if tensor_util.is_tensor(batch_size):
batch_size = tensor_util.constant_value(batch_size)
if tensor_util.is_tensor(drop_remainder):
drop_remainder = tensor_util.constant_value(drop_remainder)
return batch_size, drop_remainder
# TODO(sourabhbajaj): Remove this in lieu of distributed datasets
def _get_dataset_attributes(dataset):
"""Get the underlying attributes from the dataset object."""
# pylint: disable=protected-access
# First, get batch_size and drop_remainder from the dataset. We need
# to walk back the dataset creation process and find the batched version in
# order to get the attributes.
batched_dataset = _get_batched_dataset(dataset)
batch_size, drop_remainder = _get_batched_dataset_attributes(batched_dataset)
# Second, prefetch buffer should be get from the original dataset.
prefetch_buffer = None
if isinstance(dataset, dataset_ops.PrefetchDataset):
prefetch_buffer = dataset._buffer_size
elif (isinstance(dataset, dataset_ops.DatasetV1Adapter)
and isinstance(dataset._dataset, dataset_ops.PrefetchDataset)):
prefetch_buffer = dataset._dataset._buffer_size
return batch_size, drop_remainder, prefetch_buffer
class MultiStepContext(object):
"""A context object that can be used to capture things when running steps.
This context object is useful when running multiple steps at a time using the
`experimental_run_steps_on_iterator` API. For e.g. it allows the user's step
function to specify which outputs to emit at what frequency. Currently it
supports capturing output from the last step, as well as capturing non tensor
outputs. In the future it will be augmented to support other use cases such
as output each N steps.
"""
def __init__(self):
"""Initialize an output context.
Returns:
A context object.
"""
self._last_step_outputs = {}
self._last_step_outputs_reduce_ops = {}
self._non_tensor_outputs = {}
@property
def last_step_outputs(self):
"""A dictionary consisting of outputs to be captured on last step.
Keys in the dictionary are names of tensors to be captured, as specified
when `set_last_step_output` is called.
Values in the dictionary are the tensors themselves. If
`set_last_step_output` was called with a `reduce_op` for this output,
then the value is the reduced value.
Returns:
A dictionary with last step outputs.
"""
return self._last_step_outputs
def _set_last_step_outputs(self, outputs):
"""Replace the entire dictionary of last step outputs."""
if not isinstance(outputs, dict):
raise ValueError("Need a dictionary to set last_step_outputs.")
self._last_step_outputs = outputs
def set_last_step_output(self, name, output, reduce_op=None):
"""Set `output` with `name` to be outputted from the last step.
Args:
name: String, name to identify the output. Doesn't need to match tensor
name.
output: The tensors that should be outputted with `name`. See below for
actual types supported.
reduce_op: Reduction method to use to reduce outputs from multiple
replicas. Required if `set_last_step_output` is called in a replica
context. Optional in cross_replica_context.
When present, the outputs from all the replicas are reduced using the
current distribution strategy's `reduce` method. Hence, the type of
`output` must be what's supported by the corresponding `reduce` method.
For e.g. if using MirroredStrategy and reduction is set, output
must be a `PerReplica` value.
The reduce method is also recorded in a dictionary
`_last_step_outputs_reduce_ops` for later interpreting of the
outputs as already reduced or not.
"""
if distribution_strategy_context.in_cross_replica_context():
self._last_step_outputs_reduce_ops[name] = reduce_op
if reduce_op is None:
self._last_step_outputs[name] = output
else:
distribution = distribution_strategy_context.get_strategy()
self._last_step_outputs[name] = distribution.reduce(reduce_op, output)
else:
assert reduce_op is not None
def merge_fn(distribution, value):
self._last_step_outputs[name] = distribution.reduce(reduce_op, value)
# Setting this inside the `merge_fn` because all replicas share the same
# context object, so it's more robust to set it only once (even if all
# the replicas are trying to set the same value).
self._last_step_outputs_reduce_ops[name] = reduce_op
distribution_strategy_context.get_replica_context().merge_call(
merge_fn, args=(output,))
@property
def non_tensor_outputs(self):
"""A dictionary consisting of any non tensor outputs to be captured."""
return self._non_tensor_outputs
def set_non_tensor_output(self, name, output):
"""Set `output` with `name` to be captured as a non tensor output."""
if distribution_strategy_context.in_cross_replica_context():
self._non_tensor_outputs[name] = output
else:
def merge_fn(distribution, value):
# NOTE(priyag): For non tensor outputs, we simply return all the values
# in a list as reduction doesn't make sense on non tensors.
self._non_tensor_outputs[name] = distribution.unwrap(value)
distribution_strategy_context.get_replica_context().merge_call(
merge_fn, args=(output,))
| apache-2.0 |
ResidentMario/geoplot | examples/plot_los_angeles_flights.py | 1 | 1907 | """
Sankey of Los Angeles flight volumes with Cartopy globes
========================================================
This example plots passenger volumes for commercial flights out of Los Angeles International
Airport. Some globe-modification options available in ``cartopy`` are demonstrated. Visit
`the cartopy docs <https://scitools.org.uk/cartopy/docs/latest/matplotlib/feature_interface.html>`_
for more information.
"""
import geopandas as gpd
import geoplot as gplt
import geoplot.crs as gcrs
import matplotlib.pyplot as plt
import cartopy
import mapclassify as mc
la_flights = gpd.read_file(gplt.datasets.get_path('la_flights'))
scheme = mc.Quantiles(la_flights['Passengers'], k=5)
f, axarr = plt.subplots(2, 2, figsize=(12, 12), subplot_kw={
'projection': gcrs.Orthographic(central_latitude=40.7128, central_longitude=-74.0059)
})
plt.suptitle('Popular Flights out of Los Angeles, 2016', fontsize=16)
plt.subplots_adjust(top=0.95)
ax = gplt.sankey(
la_flights, scale='Passengers', hue='Passengers', cmap='Purples', scheme=scheme, ax=axarr[0][0]
)
ax.set_global()
ax.outline_patch.set_visible(True)
ax.coastlines()
ax = gplt.sankey(
la_flights, scale='Passengers', hue='Passengers', cmap='Purples', scheme=scheme, ax=axarr[0][1]
)
ax.set_global()
ax.outline_patch.set_visible(True)
ax.stock_img()
ax = gplt.sankey(
la_flights, scale='Passengers', hue='Passengers', cmap='Purples', scheme=scheme, ax=axarr[1][0]
)
ax.set_global()
ax.outline_patch.set_visible(True)
ax.gridlines()
ax.coastlines()
ax.add_feature(cartopy.feature.BORDERS)
ax = gplt.sankey(
la_flights, scale='Passengers', hue='Passengers', cmap='Purples', scheme=scheme, ax=axarr[1][1]
)
ax.set_global()
ax.outline_patch.set_visible(True)
ax.coastlines()
ax.add_feature(cartopy.feature.LAND)
ax.add_feature(cartopy.feature.OCEAN)
ax.add_feature(cartopy.feature.LAKES)
ax.add_feature(cartopy.feature.RIVERS)
| mit |
WMD-Bath/dvxc | rvo_stats.py | 3 | 36130 | #! /usr/bin/env python
# rvo_stats.py
# Copyright 2015 Adam J. Jackson and Jonathan M. Skelton
from __future__ import print_function
import csv
import math
import numpy as np
from collections import namedtuple
import matplotlib as mpl
import matplotlib.pyplot as plt
# from matplotlib.ticker import FuncFormatter
import rvo
from rvo import EVPerCubicAngstromInGPa
### set up data files ###
# src_dir = os.path.dirname(__file__)
# # Append a trailing slash to make coherent directory name - this would select
# # the root directory in the case of no prefix, so we need to check
# if src_dir:
# src_dir = src_dir + '/'
# data_dir = src_dir + '../data/'
### Plotting configuration ###
functional_colours = {
'LDA' : ( 0, 0, 204),
'PW91' : (204, 0, 0),
'PBE' : ( 0, 153, 0),
'PBEsol' : (112, 48, 160),
'TPSS' : (255, 192, 0),
'revTPSS' : (214, 0, 147),
'PBE+D2' : ( 0, 176, 240),
'B3LYP' : (100, 100, 50),
'HSE06' : ( 50, 80, 50)
}
functional_markers = { 'LDA' : "^", 'PW91' : "o", 'PBE' : "s", 'PBEsol' : "D",
'TPSS' : "x", 'revTPSS' : "p", 'PBE+D2' : "+" ,
'B3LYP' : "<", 'HSE06' : ">"}
iter_plot_bar_hatches = [None, "//", "/", "---", 'o', 'oo']
# Global initialisation of Matplotlib settings and variables/objects; the font
# size is kept as a variable, because calls to plt.legend() don't seem to use
# the rc settings. COMMENT: Note that the line width *is* hard-coded in
# places, so changing the variable here won't update everything on the plots.
mpl.rc('font', **{ 'family' : 'sans-serif', 'size' : 10,
'sans-serif' : 'Arial' })
mpl.rc('lines', **{ 'linewidth' : 0.5 })
font_size = 10
global data_dir
def main(verbosity=False, to_plot=["none"], data_path="",
to_write=["none"], compounds=False, compact=False):
global data_dir
data_dir = data_path
if data_dir[-1] != '/':
data_dir += '/'
all_data_sets = [
("PbS", data_dir + u"PbS-EVPData.csv"),
("PbTe", data_dir + u"PbTe-EVPData.csv"),
("ZnS", data_dir + u"ZnS-EVPData.csv"),
("ZnTe", data_dir +u"ZnTe-EVPData.csv")
]
### Set up verbose printing ###
if verbosity:
def vprint(*args):
for arg in args:
print(arg, end=' ')
print('')
else:
def vprint(*args):
pass
### Let "none" override other output selections
if "none" in to_plot:
to_plot = ["none"]
if "none" in to_write:
to_write = ["none"]
### Trim down data_sets if requested: ###
# global data_sets # Python is weird about global variables:
# # this line is needed by the "if" statement but not the
# # "for" loop!
if compounds:
data_sets = [entry for entry in all_data_sets if entry[0] in compounds]
else:
data_sets = all_data_sets
# Main loop: Simulated RVO over selected materials
for output_prefix, filename in data_sets:
### Import data and find optimal volumes ###
print("Processing: {0}\n".format(filename))
vprint(" -> Reading input data...")
functionals, data = read_dataset(filename)
vprint(" -> Determining Veq by Murnaghan fitting...")
eos_fits = {}
for functional in functionals:
murnaghan_params = rvo.murnaghan_fit(data[functional].e_values,
data[functional].v_values)
vprint(" -> {0}: {1:.2f} A^3 (RMS = {2:.2e})".format(
functional, murnaghan_params.v0, murnaghan_params.eRMS))
eos_fits[functional] = murnaghan_params
vprint("\n")
if "eos" in to_plot or "all" in to_plot:
plot_filename = "{0}_EoS.png".format(output_prefix)
print(" -> Plotting {0}...".format(plot_filename))
plot_EVP_data(functionals, data, eos_fits,
plot_filename=plot_filename, compact=compact)
if "eos" in to_write or "all" in to_write:
out_filename = "{0}_EoS.csv".format(output_prefix)
print(" -> Outputting {0}...".format(out_filename))
with open(out_filename, 'w') as f:
csv_writer = csv.writer(f, delimiter = ',',quotechar = '\"',
quoting = csv.QUOTE_ALL)
csv_writer.writerow(["Functional", "E0 / eV", "V0 / A^3",
"K0 / GPa", "K'0", "RMS Error"])
for functional in functionals:
fit = eos_fits[functional]
csv_writer.writerow([functional, fit.e0, fit.v0,
fit.k0 * EVPerCubicAngstromInGPa,
fit.kPrime0, fit.eRMS])
### Carry out RVO method for all functionals ###
print(" -> Applying RVO correction to all " +
"combinations of functionals...")
RvoResult = namedtuple('RvoResult', 'functional delta_v residual_p')
rvo_results={}
for test_functional in functionals:
initial_v_values = data[test_functional].v_values
initial_p_values = data[test_functional].p_values
delta_v = {}
residual_p = {}
# Compare with all other functionals
for ref_functional in (f for f in functionals if f != functional):
dv = [rvo.apply_rvo_murnaghan(p, eos_fits[ref_functional]) \
for p in initial_p_values]
p = [rvo.murnaghan_pressure(v + dv_v,
eos_fits[test_functional]) \
for (v, dv_v) in zip(initial_v_values, dv)]
delta_v.update({ref_functional: dv})
residual_p.update({ref_functional: p})
rvo_results.update({test_functional: RvoResult(
test_functional, delta_v, residual_p)})
if "rvo" in to_write:
### write out table of delta p xc corrections ###
out_filename = "{0}_rvo.csv".format(output_prefix)
vprint(" --> Writing to file {0}...".format(out_filename))
with open(out_filename, 'w') as f:
csv_writer = csv.writer(f, delimiter = ',')
csv_writer.writerow(["Test functional", "Reference functional",
"v / AA^3", "p / kbar", "Delta V / AA^3",
"residual pressure / kbar"])
for test_functional in functionals:
for ref_functional in rvo_results[test_functional].delta_v:
# Zip together numbers and concatenate to labels
for row in zip(
data[test_functional].v_values,
data[test_functional].p_values,
rvo_results[test_functional].delta_v[ref_functional],
rvo_results[test_functional].residual_p[ref_functional]
):
# print(row)
# print(list(row))
csv_writer.writerow([test_functional,
ref_functional] +
list(row))
# Iterative procedure
print(" -> Applying correction iteratively to all combinations")
iter_results = {}
for functional in functionals:
iter_results.update({functional:{}})
for ref_functional in (f for f in functionals if f!= functional):
iter_result = sim_iterative_rvo(eos_fits[functional],
eos_fits[ref_functional],
max_iter=5)
iter_results[functional].update({ref_functional:iter_result})
for option in ("iterative_p", "iterative_v", "all"):
if option in to_write:
print("Sorry, data file not yet available for iterations")
if "iterative_p" in to_plot or "all" in to_plot:
filename = "{0}_Iterative_P.png".format(output_prefix)
print(" -> Plotting iterative P results to {0}".format(filename))
plot_iterative_results(functionals, iter_results, eos_fits,
filename, mode='pressure')
if "iterative_v" in to_plot or "all" in to_plot:
filename = "{0}_Iterative_V.png".format(output_prefix)
print(" -> Plotting iterative V results to {0}".format(filename))
plot_iterative_results(functionals, iter_results, eos_fits,
filename, mode='volume')
### Sensitivity to initial distance ###
# Calculate volume shift corresponding to an RVO calculation at each
# point of one data set, calculate residual pressure associated with
# this point from fit
iterations = 4
# Check if this is required:
if ("v_sensitivity" in to_plot or
"all" in to_plot or
"v_sensitivity" in to_write or
"all" in to_write):
for (test_functional, ref_functional) in (
("B3LYP", "PW91"),
("HSE06", "PBE")
):
(v_init, v_corrected) = v_sensitivity(eos_fits[test_functional],
eos_fits[ref_functional],
range_percent=3,
iterations=iterations)
if "v_sensitivity" in to_plot or "all" in to_plot:
filename = "{0}_{1}_Initial_V_sensitivity.png".format(
output_prefix, test_functional)
print((" -> Plotting V sensitivity for {0} " +
"(ref. {1}) to {2}").format(test_functional,
ref_functional,
filename))
plot_v_sensitivity(v_init,v_corrected,
test_functional,ref_functional,
v0=eos_fits[test_functional].v0,
filename=filename)
if "v_sensitivity" in to_write or "all" in to_write:
filename = "{0}_{1}_Initial_V_sensitivity.csv".format(
output_prefix, test_functional)
print((" -> Writing V sensitivity data for {0} " +
"(ref. {1}) to {2}").format(test_functional,
ref_functional,
filename))
with open(filename,'w') as f:
csv_writer = csv.writer(f, delimiter = ',')
header = (["v_initial / AA"] +
["v_corrected (iteration {0})".format(x)
for x in range(1,iterations+1)])
csv_writer.writerow(header)
for i in range(len(v_init)):
csv_writer.writerow(
[v_init[i]] + list(v_corrected[i,:])
)
print('\n')
if "v_bandgap" in to_plot or "all" in to_plot:
plot_bandgaps([x[0] for x in data_sets], do_plot=True,
plot_filename="v-bandgaps.png", compact=compact)
def read_dataset(filename):
"""Read in data from CSV file
Arguments: "filename" (text string containing path to file)
Returns: tuple (functionals, datasets)
where "functionals" is a list of functional names and "datasets" is a
set of "DataSet" namedtuple objects containing v_values, e_values and
p_values (numpy arrays of volumes, energies and pressures in units of
eV and angstrom)
Required file format:
Line 1: Functional1,,,Functional2,,, ... ,FunctionalN,,
Line 2: (unused, contain units for readability)
Remaining lines (data): volume, energy, p_ext, volume, energy, p_ext ...
"""
with open(filename, 'r') as f:
input_reader_csv = csv.reader(f)
# Read functional names from first line
functionals = next(input_reader_csv)[::3]
# Skip second line
next(input_reader_csv)
#fill datasets
datasets = {}
DataSet = namedtuple('DataSet', 'v_values e_values p_values')
for functional in functionals:
datasets[functional] = DataSet([],[],[])
for row in input_reader_csv:
for i, name in enumerate(functionals):
datasets[name].v_values.append(float(row[i*3]))
datasets[name].e_values.append(float(row[i*3+1]))
datasets[name].p_values.append(float(row[i*3+2]))
# Finally, convert to numpy arrays
for functional in functionals:
datasets[functional] = DataSet(
np.array(datasets[functional].v_values),
np.array(datasets[functional].e_values),
np.array(datasets[functional].p_values)
)
return (functionals, datasets)
def plot_EVP_data(functionals, evp_data, eos_fits, plot_filename=False,
compact=False):
"""
Plot energy-volume and pressure-volume curves
Arguments:
functionals: list of named functionals
evp_data: dict of namedtuple objects containing lists of values:
{'functional1':DataSet([v1,v2,v3...],[e1,e2,e3...],[p1,p2,p3...],
'functional2': ...}
where DataSet = namedtuple('DataSet', 'v_values e_values p_values')
eos_fits: dict of namedtuple objects containing Murnaghan parameters:
{'functional1': MurnaghanFit('e0, v0, k0, kPrime0, eRMS'),
'functional2': ...} where
MurnaghanFit = namedtuple('MurnaghanFit', 'e0 v0 k0 kPrime0 eRMS')
plot_filename: File name/path for plotted output. If False, plot is
displayed onscreen instead.
compact: [boolean] If True, show 2 plots instead of 4 (for publication)
"""
if compact:
fig_dimensions = (8 / 2.54, 14 / 2.54)
else:
fig_dimensions = (17.2 / 2.54, 12 / 2.54)
plt.figure(figsize = fig_dimensions)
subplot_axes = []
### Plot 1: E-V curve ###
if compact:
plt.subplot(2,1,1)
else:
plt.subplot(2,2,1)
for functional in functionals:
(r,g,b), marker = (functional_colours[functional],
functional_markers[functional])
plt.plot(evp_data[functional].v_values,
evp_data[functional].e_values - eos_fits[functional].e0,
color = (r / 255.0, g / 255.0, b / 255.0),
marker = marker, fillstyle = 'none',
label=functional)
plt.xlabel(r"$V$ / $\AA^{3}$", fontweight = 'bold')
plt.ylabel("$E - E_{0}$ / eV", fontweight = 'bold')
x_ticks = plt.xticks()[0]
x_tick_spacing = x_ticks[1] - x_ticks[0]
y_min, y_max = plt.ylim()
plt.ylim(0.0, y_max)
axes=plt.gca()
subplot_axes.append(axes)
if compact:
handles, labels = axes.get_legend_handles_labels()
# Print legends for first half of list
# N.B. // is notation for integer floor division
axes.legend(handles[:len(handles) // 2], labels[:len(handles) // 2],
loc = 'upper right', frameon = False,
prop = {'size' : font_size})
### Plot 2: Relative volume ###
if not compact:
plt.subplot(2,2,3)
x_min, x_max = None, None
for functional in functionals:
v_values_adjusted = (evp_data[functional].v_values
- eos_fits[functional].v0)
e_values_adjusted = (evp_data[functional].e_values
- eos_fits[functional].e0)
(r,g,b), marker = (functional_colours[functional],
functional_markers[functional])
plt.plot(v_values_adjusted, e_values_adjusted,
color = (r / 255.0, g /255.0, b / 255.0),
marker=marker, fillstyle='none')
x_min = np.min(v_values_adjusted) if not x_min \
else min(x_min, np.min(v_values_adjusted))
x_max = np.max(v_values_adjusted) if not x_max \
else max(x_max, np.max(v_values_adjusted))
plt.xlabel(r"$V - V_{0}$ / $\AA^{3}$", fontweight = 'bold')
plt.ylabel("$E - E_{0}$ / eV", fontweight = 'bold')
plot_x_min = math.floor(x_min / x_tick_spacing) * x_tick_spacing
plot_x_max = math.ceil(x_max / x_tick_spacing) * x_tick_spacing
plt.xlim(plot_x_min, plot_x_max)
y_min, y_max = plt.ylim()
plt.ylim(0.0, y_max)
subplot_axes.append(plt.gca())
### Plot 3: P-V curve ###
if compact:
plt.subplot(2,1,2)
else:
plt.subplot(2,2,2)
def zeroline():
plt.axhline(y=0,color='k',linestyle='-')
zeroline()
for functional in functionals:
(r,g,b), marker = (functional_colours[functional],
functional_markers[functional])
plt.plot(evp_data[functional].v_values,
evp_data[functional].p_values / 10,
label = functional,
color = (r / 255.0, g / 255.0, b / 255.0),
marker = marker, fillstyle = 'none' )
plt.xlabel(r"$V$ / $\AA^{3}$", fontweight = 'bold')
plt.ylabel("$P$ / GPa", fontweight = 'bold')
axes = plt.gca()
handles, labels = axes.get_legend_handles_labels()
if compact:
# Print legends for second half of list
axes.legend(handles[len(handles) // 2:],
labels[len(handles) // 2:],
loc = 'upper right', frameon = False,
prop = { 'size' : font_size })
else:
# Print legends for first half of list
# N.B. // is notation for integer floor division
axes.legend(handles[:len(handles) // 2], labels[:len(handles) // 2],
loc = 'upper right', frameon = False,
prop = {'size' : font_size})
subplot_axes.append(axes)
### Plot 4: P-V for normalised V ###
if not compact:
plt.subplot(2,2,4)
x_min, x_max = None, None
zeroline()
for functional in functionals:
v_values_adjusted = (evp_data[functional].v_values
- eos_fits[functional].v0)
(r,g,b), marker = (functional_colours[functional],
functional_markers[functional])
x_min = np.min(v_values_adjusted) if not x_min \
else min(x_min, np.min(v_values_adjusted))
x_max = np.max(v_values_adjusted) if not x_max \
else max(x_max, np.max(v_values_adjusted))
plt.plot(evp_data[functional].v_values - eos_fits[functional].v0,
evp_data[functional].p_values / 10.0,
label=functional,
color = (r / 255.0, g /255.0, b / 255.0),
marker=marker, fillstyle='none')
plt.xlabel(r"$V - V_{0}$ / $\AA^{3}$", fontweight = 'bold')
plt.ylabel("$P$ / GPa", fontweight = 'bold')
plot_x_min = math.floor(x_min / x_tick_spacing) * x_tick_spacing
plot_x_max = math.ceil(x_max / x_tick_spacing) * x_tick_spacing
plt.xlim(plot_x_min, plot_x_max)
axes = plt.gca()
handles, labels = axes.get_legend_handles_labels()
axes.legend(handles[len(handles) // 2:],
labels[len(handles) // 2:],
loc = 'upper right', frameon = False,
prop = { 'size' : font_size })
subplot_axes.append(axes)
### Finish up plots ###
for axes in subplot_axes:
for spine in axes.spines.values():
spine.set_linewidth(0.5)
plt.tight_layout()
if plot_filename:
plt.savefig(plot_filename, format = 'png', dpi = 300)
else:
plt.show()
plt.close()
def sim_iterative_rvo(test_functional_murnaghan_fit,
ref_functional_murnaghan_fit,
max_iter = 5):
"""Simulate the iterative application of RVO procedure
Rather than carry out new calculations for every combination of
functionals, we use a Murnaghan fit for both the test functional and the
reference functional. Calculated pressure values from the reference
functional fit are a stand-in for what would be a quantum chemical
calculation at the volume predicted by the delta V xc procedure.
Arguments:
test_functional_murnaghan_fit: a MurnaghanFit namedtuple, containing
the fitted parameters for a Murnaghan EoS describing the P-V curve
to be minimised. (i.e. results from an expensive DFT functional.)
ref_functional_murnaghan_fit: a corresponding MurnaghanFit namedtuple,
parameterising an EoS to be used in the RVO correction.
(e.g. results from an inexpensive DFT functional.)
max_iter: Integer number of iterative steps to carry out.
Returns:
v_iterations: A list [v0, v1, v2 ... ] of volumes corresponding to the
iterative steps taken.
p_iterations: A list [p0, p1, p2 ... ] of pressures corresponding to
iterative steps taken. Beware that these are in the units of the
EoS, typically eV AA^-3.
"""
try:
max_iter = int(max_iter)
except ValueError:
raise Exception(
"Couldn't use '{0}' as a number of iterations".format(max_iter))
# Initialise arrays and variables
v = ref_functional_murnaghan_fit.v0
p = rvo.murnaghan_pressure(v, test_functional_murnaghan_fit)
v_iterations = [v] + [False]*(max_iter)
p_iterations = [p] + [False]*(max_iter)
for index in range(1,max_iter+1):
delta_v = rvo.apply_rvo_murnaghan(p, ref_functional_murnaghan_fit)
v = v + delta_v
p = rvo.murnaghan_pressure(v, test_functional_murnaghan_fit)
v_iterations[index] = v
p_iterations[index] = p
return (v_iterations, p_iterations)
def plot_iterative_results(functionals, iter_results, eos_fits,
filename, mode='pressure', n_columns=3):
"""Plot values from iterative application of RVO
Arguments:
functionals: list of functionals to include in plot
iter_results: dict of dict of tuples containing result lists
{test_functional: {ref_functional: ([v0,v1,v2...],
[p0,p1,p2...])
, ...}
, ...}
filename: string containing target plot file path
mode: string 'volume' or 'pressure', denoting property to plot
[default value = 'pressure']
n_columns: width of plot array [default value = 3]
Raises:
Exception "Plotting mode [mode] not accepted." if invalid mode
Exception "Couldn't convert [n_columns] to a meaningful number of
columns." if n_columns can't be converted to an int.
Exception "No data for test functional [functional name]"; functional
appears in list "functionals" but not "iter_results"
Exception "[test functional] has not been tested with [ref functional]"
; [test functional] appears in iter_results, but does not appear
in the results for [ref functional].
"""
# Check input
supported_modes = ('volume', 'pressure')
if mode not in supported_modes:
raise Exception("Plotting mode '{0}' not accepted.".format(mode) +
"Valid output modes: {0}".format(supported_modes))
try:
n_columns = int(n_columns)
except ValueError:
raise Exception("Couldn't convert '{0}' to ".format(n_columns)
+ "a meaningful number of columns.")
# Check requested set of functionals are in results
for test_functional in functionals:
try:
test_results = iter_results[test_functional]
except KeyError:
raise Exception(
"No data for test functional {0}".format(test_functional))
for ref_functional in (x for x in functionals if x != test_functional):
try:
ref_result = iter_results[test_functional][ref_functional]
except ValueError:
raise Exception("{0} has not been".format(test_functional) +
"tested with ref functional " +
"{0}".format(ref_functional))
# Work out number of rows given n_columns
n_rows, remainder = divmod(len(iter_results), n_columns)
if remainder: n_rows += 1
plt.figure(figsize = (25.8 / 2.54, (6.5 * n_rows) / 2.54))
subplot_axes = []
# Take number of bars for first combination, assume(!) rest are consistent
n_bars = len(iter_results[functionals[0]][functionals[1]][0])
n_groups = len(functionals)
# Set other plot parameters
bar_width = 0.8
# Work out bar positions
left_edges_base = [(bar_width
+ i * n_bars * bar_width +
i * bar_width) for i in range(n_groups)]
# Get plotting
for subplot_index, test_functional in enumerate(functionals):
plot_data = iter_results[test_functional]
# Modification to (potentially) make the layout look prettier: If the last
# plot is on its own in a row with an odd number of columns, put it in the
# middle.
if (n_columns % 2 != 0
and subplot_index == len(functionals) - 1
and len(functionals) % n_columns == 1):
plt.subplot(n_rows, n_columns,
subplot_index + 1 + (n_columns - 1) // 2)
else:
plt.subplot(n_rows, n_columns, subplot_index + 1)
for group_index, ref_functional in enumerate(
[f for f in functionals if f != test_functional]):
if mode == 'volume':
volumes = plot_data[ref_functional][0]
v0 = eos_fits[test_functional].v0
bar_heights = [abs(v - v0) for v in volumes]
elif mode == 'pressure':
bar_heights = [abs(p)/10.0 for p in plot_data[ref_functional][1]]
left_edges = [(left_edges_base[group_index]
+ bar_index * bar_width
) for bar_index in range(len(bar_heights))]
r, g, b = functional_colours[ref_functional]
bar_group_colour = (r / 255.0, g / 255.0, b / 255.0)
# Individual bars are plotted separately in order to set the hatch
# styles. The "bottom" argument needs to be set to a small (but
# non-zero) number due to some Matplotlib issues with plt.bar() and
# log scale y axes. 10^-3 was chosen as 0.001 % / 0.001 GPa is a
# very high error margin for predicting volumes/pressures!
lower_lim = 1E-5
for bar_index in range(len(bar_heights)):
plt.bar(left_edges[bar_index],
bar_heights[bar_index],
bottom = lower_lim, color = bar_group_colour,
edgecolor = bar_group_colour, linewidth = 0.5,
fill = iter_plot_bar_hatches[bar_index] == None,
hatch = iter_plot_bar_hatches[bar_index])
# Add outline in independent colour
plt.bar(left_edges[bar_index], bar_heights[bar_index],
bottom = lower_lim, edgecolor = 'k', linewidth = 0.5, fill = False)
plt.title("Reference: {0}".format(test_functional),
size = font_size, fontweight = 'bold')
if mode == 'volume':
plt.ylabel(r"abs($\Delta V$) / %", fontweight = 'bold')
elif mode == 'pressure':
plt.ylabel(r"$\mathrm{abs}(P)$ / GPa", fontweight = 'bold')
plt.xticks([value + (bar_width * n_bars) / 2.0 for value in left_edges_base],
[f for f in functionals if f != test_functional],
rotation = 30, ha='center')
# plt.xlim(0.0, left_edges_base[-1] +
# n_bars * bar_width + bar_width)
plt.xlim(0.0, left_edges_base[-1])
subplot_axes.append(plt.gca())
for axes in subplot_axes:
axes.set_ylim(lower_lim, 10.0)
axes.set_yscale('log', noposy = 'clip')
for spine in axes.spines.values():
spine.set_linewidth(0.5)
plt.tight_layout()
plt.savefig(filename, format = 'png', dpi = 300)
def v_sensitivity(test_functional_fit, ref_functional_fit,
range_percent=3, points=100, iterations=4):
v0 = test_functional_fit.v0
v_upper = v0 * (1.+range_percent/100.)
v_lower = v0 * (1.-range_percent/100.)
v_init = np.linspace(v_lower,v_upper,points)
v_corrected = []
v = v_init.copy()
for i in range(iterations):
p = rvo.murnaghan_pressure(v, test_functional_fit)
delta_v = rvo.apply_rvo_murnaghan(p, ref_functional_fit)
v = v + delta_v
v_corrected.append(v)
return (v_init, np.array(v_corrected).T)
def plot_v_sensitivity(v_init,v_corrected, test_functional,ref_functional,
v0=False, filename=False):
"""
Plot showing convergence over iterations and range of initial volumes
Arguments:
v_init: 1D numpy array of volume values
v_corrected: list of 1D numpy arrays, corresponding to iterative
volume estimates
test_functional: String containing name of test functional
ref_functional: String containing name of reference functional
v0: Optionally provide actual minimum volume. X axis is rescaled to
show this.
filename: Target file for plot export. If not provided, graph is drawn
to screen
"""
if v0:
x = v_init/v0
y = v_corrected/v0
xlabel = "Initial volume / Optimum volume"
ylabel = "Corrected volume / Optimum volume"
else:
x = v_init
y = v_corrected
xlabel = "Initial volume / $\AA^3$"
ylabel = "Corrected volume / $\AA^3$"
plt.figure(figsize = (8 / 2.54, 8 / 2.54))
plt.plot(x, y)
axes = plt.gca()
for spine in axes.spines.values():
spine.set_linewidth(0.5)
y_formatter = mpl.ticker.ScalarFormatter(useOffset=False)
axes.yaxis.set_major_formatter(y_formatter)
plt.title("Corrected functional: {0}\n".format(test_functional) +
"Reference functional: {0}".format(ref_functional))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend(range(1,len(v_corrected)+1), loc='best', title="Iterations")
plt.tight_layout()
if filename:
plt.savefig(filename)
else:
plt.show()
def plot_bandgaps(materials,do_plot=True,do_write=False,plot_filename=False,
compact=False):
data, kpoints = {}, {}
BandGapData = namedtuple("BandGapData","factor v eg kpoint")
markers = {'PbS':'^', 'PbTe':'>', 'ZnS':'o', 'ZnTe':'x'}
colours = {'PbS':'b', 'PbTe':'g', 'ZnS':'r', 'ZnTe':'k'}
functionals = ["B3LYP","HSE06"]
for functional in functionals:
data.update({functional: {}})
for material in materials:
with open(data_dir +
"{0} - {1} - Direct Bandgaps.csv".format(material,
functional),
"r") as f:
csv_reader = csv.reader(f)
# skip first line
next(csv_reader)
# First row
text_row = next(csv_reader)
row = [float(x) for x in text_row]
factor, v, eg = [row[0]], [row[1]], [row[2]]
kpoint = row[3:]
for text_row in csv_reader:
row = [float(x) for x in text_row]
factor.append(row[0])
v.append(row[1])
eg.append(row[2])
data[functional].update({material:BandGapData(factor,v,eg,kpoint)})
if do_plot:
if compact:
fig_dimensions = (8 / 2.54, 14 / 2.54)
else:
fig_dimensions = (17.2 / 2.54, 12 / 2.54)
plt.figure(figsize = fig_dimensions)
subplot_axes = []
collect_factors, collect_eg = [], [] # List all values to get ranges
for i, functional in enumerate(functionals):
if compact:
plt.subplot(len(functionals),1,i)
else:
plt.subplot(1,len(functionals),i)
for material in materials:
# Scale factor is in each dimension; cube to obtain volume change
x = np.power(data[functional][material].factor, 3)
y = data[functional][material].eg
plt.plot(x,y, label=material,
color=colours[material], marker=markers[material], linestyle='none')
collect_factors += data[functional][material].factor
collect_eg += data[functional][material].eg
plt.xlabel(r"$v/v_0$",fontweight = 'bold')
plt.ylabel("Bandgap / eV",fontweight = 'bold')
plt.title(functional,fontweight = 'bold')
plt.legend(loc="best", frameon = False,
prop = {'size' : font_size}, ncol=2)
subplot_axes.append(plt.gca())
x_min, x_max = float(min(collect_factors))**3, float(max(collect_factors))**3
y_min, y_max = 0, math.ceil(float(max(collect_eg)))
for axes in subplot_axes:
axes.set_xlim((x_min, x_max))
axes.set_ylim((y_min, y_max))
for spine in axes.spines.values():
spine.set_linewidth(0.5)
plt.tight_layout()
if plot_filename:
plt.savefig(plot_filename, format='png',dpi=300)
else:
plt.show()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Simulate application of RVO method to a dataset of binary chalcogenides.")
parser.add_argument("data_directory", help="path to 'binary_chalcogenides' folder")
parser.add_argument("-v", "--verbose", help="increase output verbosity",
action="store_true")
parser.add_argument("-p", "--plot",
choices=["eos","iterative_p","iterative_v",
"v_sensitivity","v_bandgap", "none", "all"],
nargs='*', default=["all"],
help="Output plots")
parser.add_argument("-w", "--write",
choices=["eos","rvo","iterative_p","v_bandgap",
"v_sensitivity","none","all"],
nargs='*', default=["all"],
help="Write CSV files")
parser.add_argument("-m", "--material",
choices=["PbS","PbTe","ZnS","ZnTe"],
nargs='*', default=False)
parser.add_argument("-c", "--compact", action="store_true",
help="Output compact versions of some plots")
args = parser.parse_args()
main(data_path=args.data_directory, verbosity=args.verbose, to_plot=args.plot, to_write=args.write,
compounds=args.material, compact=args.compact)
| gpl-3.0 |
pkruskal/scikit-learn | sklearn/covariance/graph_lasso_.py | 126 | 25626 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol, enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
| bsd-3-clause |
AveRapina/DeepLearningTutorials | code/mlp.py | 39 | 14267 | """
This tutorial introduces the multilayer perceptron using Theano.
A multilayer perceptron is a logistic regressor where
instead of feeding the input to the logistic regression you insert a
intermediate layer, called the hidden layer, that has a nonlinear
activation function (usually tanh or sigmoid) . One can use many such
hidden layers making the architecture deep. The tutorial will also tackle
the problem of MNIST digit classification.
.. math::
f(x) = G( b^{(2)} + W^{(2)}( s( b^{(1)} + W^{(1)} x))),
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 5
"""
__docformat__ = 'restructedtext en'
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from logistic_sgd import LogisticRegression, load_data
# start-snippet-1
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# end-snippet-1
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
# start-snippet-2
class MLP(object):
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function tanh or the
sigmoid function (defined here by a ``HiddenLayer`` class) while the
top layer is a softmax layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self, rng, input, n_in, n_hidden, n_out):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# Since we are dealing with a one hidden layer MLP, this will translate
# into a HiddenLayer with a tanh activation function connected to the
# LogisticRegression layer; the activation function can be replaced by
# sigmoid or any other nonlinear function
self.hiddenLayer = HiddenLayer(
rng=rng,
input=input,
n_in=n_in,
n_out=n_hidden,
activation=T.tanh
)
# The logistic regression layer gets as input the hidden units
# of the hidden layer
self.logRegressionLayer = LogisticRegression(
input=self.hiddenLayer.output,
n_in=n_hidden,
n_out=n_out
)
# end-snippet-2 start-snippet-3
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = (
abs(self.hiddenLayer.W).sum()
+ abs(self.logRegressionLayer.W).sum()
)
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = (
(self.hiddenLayer.W ** 2).sum()
+ (self.logRegressionLayer.W ** 2).sum()
)
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
# logistic regression layer
self.negative_log_likelihood = (
self.logRegressionLayer.negative_log_likelihood
)
# same holds for the function computing the number of errors
self.errors = self.logRegressionLayer.errors
# the parameters of the model are the parameters of the two layer it is
# made out of
self.params = self.hiddenLayer.params + self.logRegressionLayer.params
# end-snippet-3
# keep track of model input
self.input = input
def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
dataset='mnist.pkl.gz', batch_size=20, n_hidden=500):
"""
Demonstrate stochastic gradient descent optimization for a multilayer
perceptron
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient
:type L1_reg: float
:param L1_reg: L1-norm's weight when added to the cost (see
regularization)
:type L2_reg: float
:param L2_reg: L2-norm's weight when added to the cost (see
regularization)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
rng = numpy.random.RandomState(1234)
# construct the MLP class
classifier = MLP(
rng=rng,
input=x,
n_in=28 * 28,
n_hidden=n_hidden,
n_out=10
)
# start-snippet-4
# the cost we minimize during training is the negative log likelihood of
# the model plus the regularization terms (L1 and L2); cost is expressed
# here symbolically
cost = (
classifier.negative_log_likelihood(y)
+ L1_reg * classifier.L1
+ L2_reg * classifier.L2_sqr
)
# end-snippet-4
# compiling a Theano function that computes the mistakes that are made
# by the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]
}
)
# start-snippet-5
# compute the gradient of cost with respect to theta (sotred in params)
# the resulting gradients will be stored in a list gparams
gparams = [T.grad(cost, param) for param in classifier.params]
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs
# given two lists of the same length, A = [a1, a2, a3, a4] and
# B = [b1, b2, b3, b4], zip generates a list C of same size, where each
# element is a pair formed from the two lists :
# C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(classifier.params, gparams)
]
# compiling a Theano function `train_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-5
###############
# TRAIN MODEL #
###############
print '... training'
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if (
this_validation_loss < best_validation_loss *
improvement_threshold
):
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [test_model(i) for i
in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print(('Optimization complete. Best validation score of %f %% '
'obtained at iteration %i, with test performance %f %%') %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if __name__ == '__main__':
test_mlp()
| bsd-3-clause |
nightjean/Deep-Learning | tensorflow/examples/learn/resnet.py | 35 | 5962 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example builds deep residual network for mnist data.
Reference Paper: http://arxiv.org/pdf/1512.03385.pdf
Note that this is still a work-in-progress. Feel free to submit a PR
to make this better.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from math import sqrt
import os
import tensorflow as tf
batch_norm = tf.contrib.layers.batch_norm
convolution2d = tf.contrib.layers.convolution2d
def res_net(x, y, activation=tf.nn.relu):
"""Builds a residual network.
Note that if the input tensor is 2D, it must be square in order to be
converted to a 4D tensor.
Borrowed structure from:
github.com/pkmital/tensorflow_tutorials/blob/master/10_residual_network.py
Args:
x: Input of the network
y: Output of the network
activation: Activation function to apply after each convolution
Returns:
Predictions and loss tensors.
"""
# Configurations for each bottleneck group.
BottleneckGroup = namedtuple('BottleneckGroup',
['num_blocks', 'num_filters', 'bottleneck_size'])
groups = [
BottleneckGroup(3, 128, 32), BottleneckGroup(3, 256, 64),
BottleneckGroup(3, 512, 128), BottleneckGroup(3, 1024, 256)
]
input_shape = x.get_shape().as_list()
# Reshape the input into the right shape if it's 2D tensor
if len(input_shape) == 2:
ndim = int(sqrt(input_shape[1]))
x = tf.reshape(x, [-1, ndim, ndim, 1])
# First convolution expands to 64 channels
with tf.variable_scope('conv_layer1'):
net = convolution2d(
x, 64, 7, normalizer_fn=batch_norm, activation_fn=activation)
# Max pool
net = tf.nn.max_pool(net, [1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
# First chain of resnets
with tf.variable_scope('conv_layer2'):
net = convolution2d(net, groups[0].num_filters, 1, padding='VALID')
# Create the bottleneck groups, each of which contains `num_blocks`
# bottleneck groups.
for group_i, group in enumerate(groups):
for block_i in range(group.num_blocks):
name = 'group_%d/block_%d' % (group_i, block_i)
# 1x1 convolution responsible for reducing dimension
with tf.variable_scope(name + '/conv_in'):
conv = convolution2d(
net,
group.bottleneck_size,
1,
padding='VALID',
activation_fn=activation,
normalizer_fn=batch_norm)
with tf.variable_scope(name + '/conv_bottleneck'):
conv = convolution2d(
conv,
group.bottleneck_size,
3,
padding='SAME',
activation_fn=activation,
normalizer_fn=batch_norm)
# 1x1 convolution responsible for restoring dimension
with tf.variable_scope(name + '/conv_out'):
input_dim = net.get_shape()[-1].value
conv = convolution2d(
conv,
input_dim,
1,
padding='VALID',
activation_fn=activation,
normalizer_fn=batch_norm)
# shortcut connections that turn the network into its counterpart
# residual function (identity shortcut)
net = conv + net
try:
# upscale to the next group size
next_group = groups[group_i + 1]
with tf.variable_scope('block_%d/conv_upscale' % group_i):
net = convolution2d(
net,
next_group.num_filters,
1,
activation_fn=None,
biases_initializer=None,
padding='SAME')
except IndexError:
pass
net_shape = net.get_shape().as_list()
net = tf.nn.avg_pool(
net,
ksize=[1, net_shape[1], net_shape[2], 1],
strides=[1, 1, 1, 1],
padding='VALID')
net_shape = net.get_shape().as_list()
net = tf.reshape(net, [-1, net_shape[1] * net_shape[2] * net_shape[3]])
target = tf.one_hot(y, depth=10, dtype=tf.float32)
logits = tf.contrib.layers.fully_connected(net, 10, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
return tf.softmax(logits), loss
def res_net_model(x, y):
prediction, loss = res_net(x, y)
predicted = tf.argmax(prediction, 1)
accuracy = tf.equal(predicted, tf.cast(y, tf.int64))
predictions = {'prob': prediction, 'class': predicted, 'accuracy': accuracy}
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.001)
return predictions, loss, train_op
# Download and load MNIST data.
mnist = tf.contrib.learn.datasets.load_dataset('mnist')
# Create a new resnet classifier.
classifier = tf.contrib.learn.Estimator(model_fn=res_net_model)
tf.logging.set_verbosity(tf.logging.INFO) # Show training logs. (avoid silence)
# Train model and save summaries into logdir.
classifier.fit(mnist.train.images,
mnist.train.labels,
batch_size=100,
steps=1000)
# Calculate accuracy.
result = classifier.evaluate(
x=mnist.test.images,
y=mnist.test.labels,
metrics={
'accuracy':
tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='accuracy'),
})
score = result['accuracy']
print('Accuracy: {0:f}'.format(score))
| apache-2.0 |
4kula/EVA | docs/conf.py | 1 | 8667 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- Hack for ReadTheDocs ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/api")
module_dir = os.path.join(__location__, "../eva")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'EVA'
copyright = u'2017, akula'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from eva import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'eva-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'EVA Documentation',
u'akula', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| gpl-3.0 |
dimkal/mne-python | mne/io/tests/test_pick.py | 2 | 6571 | from nose.tools import assert_equal, assert_raises
from numpy.testing import assert_array_equal
import numpy as np
import os.path as op
from mne import (pick_channels_regexp, pick_types, Epochs,
read_forward_solution, rename_channels,
pick_info, pick_channels, __file__)
from mne.io.meas_info import create_info
from mne.io.array import RawArray
from mne.io.pick import (channel_indices_by_type, channel_type,
pick_types_forward, _picks_by_type)
from mne.io.constants import FIFF
from mne.io import Raw
from mne.datasets import testing
from mne.forward.tests import test_forward
from mne.utils import run_tests_if_main
def test_pick_channels_regexp():
"""Test pick with regular expression
"""
ch_names = ['MEG 2331', 'MEG 2332', 'MEG 2333']
assert_array_equal(pick_channels_regexp(ch_names, 'MEG ...1'), [0])
assert_array_equal(pick_channels_regexp(ch_names, 'MEG ...[2-3]'), [1, 2])
assert_array_equal(pick_channels_regexp(ch_names, 'MEG *'), [0, 1, 2])
def test_pick_seeg():
"""Test picking with SEEG
"""
names = 'A1 A2 Fz O OTp1 OTp2 OTp3'.split()
types = 'mag mag eeg eeg seeg seeg seeg'.split()
info = create_info(names, 1024., types)
idx = channel_indices_by_type(info)
assert_array_equal(idx['mag'], [0, 1])
assert_array_equal(idx['eeg'], [2, 3])
assert_array_equal(idx['seeg'], [4, 5, 6])
assert_array_equal(pick_types(info, meg=False, seeg=True), [4, 5, 6])
for i, t in enumerate(types):
assert_equal(channel_type(info, i), types[i])
raw = RawArray(np.zeros((len(names), 10)), info)
events = np.array([[1, 0, 0], [2, 0, 0]])
epochs = Epochs(raw, events, {'event': 0}, -1e-5, 1e-5)
evoked = epochs.average(pick_types(epochs.info, meg=True, seeg=True))
e_seeg = evoked.pick_types(meg=False, seeg=True, copy=True)
for l, r in zip(e_seeg.ch_names, names[4:]):
assert_equal(l, r)
def _check_fwd_n_chan_consistent(fwd, n_expected):
n_ok = len(fwd['info']['ch_names'])
n_sol = fwd['sol']['data'].shape[0]
assert_equal(n_expected, n_sol)
assert_equal(n_expected, n_ok)
@testing.requires_testing_data
def test_pick_forward_seeg():
"""Test picking forward with SEEG
"""
fwd = read_forward_solution(test_forward.fname_meeg)
counts = channel_indices_by_type(fwd['info'])
for key in counts.keys():
counts[key] = len(counts[key])
counts['meg'] = counts['mag'] + counts['grad']
fwd_ = pick_types_forward(fwd, meg=True, eeg=False, seeg=False)
_check_fwd_n_chan_consistent(fwd_, counts['meg'])
fwd_ = pick_types_forward(fwd, meg=False, eeg=True, seeg=False)
_check_fwd_n_chan_consistent(fwd_, counts['eeg'])
# should raise exception related to emptiness
assert_raises(ValueError, pick_types_forward, fwd, meg=False, eeg=False,
seeg=True)
# change last chan from EEG to sEEG
seeg_name = 'OTp1'
rename_channels(fwd['info'], {'EEG 060': seeg_name})
for ch in fwd['info']['chs']:
if ch['ch_name'] == seeg_name:
ch['kind'] = FIFF.FIFFV_SEEG_CH
ch['coil_type'] = FIFF.FIFFV_COIL_EEG
fwd['sol']['row_names'][-1] = fwd['info']['chs'][-1]['ch_name']
counts['eeg'] -= 1
counts['seeg'] += 1
# repick & check
fwd_seeg = pick_types_forward(fwd, meg=False, eeg=False, seeg=True)
assert_equal(fwd_seeg['sol']['row_names'], [seeg_name])
assert_equal(fwd_seeg['info']['ch_names'], [seeg_name])
# should work fine
fwd_ = pick_types_forward(fwd, meg=True, eeg=False, seeg=False)
_check_fwd_n_chan_consistent(fwd_, counts['meg'])
fwd_ = pick_types_forward(fwd, meg=False, eeg=True, seeg=False)
_check_fwd_n_chan_consistent(fwd_, counts['eeg'])
fwd_ = pick_types_forward(fwd, meg=False, eeg=False, seeg=True)
_check_fwd_n_chan_consistent(fwd_, counts['seeg'])
def test_picks_by_channels():
"""Test creating pick_lists"""
rng = np.random.RandomState(909)
test_data = rng.random_sample((4, 2000))
ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]]
ch_types = ['grad', 'mag', 'mag', 'eeg']
sfreq = 250.0
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
raw = RawArray(test_data, info)
pick_list = _picks_by_type(raw.info)
assert_equal(len(pick_list), 3)
assert_equal(pick_list[0][0], 'mag')
pick_list2 = _picks_by_type(raw.info, meg_combined=False)
assert_equal(len(pick_list), len(pick_list2))
assert_equal(pick_list2[0][0], 'mag')
pick_list2 = _picks_by_type(raw.info, meg_combined=True)
assert_equal(len(pick_list), len(pick_list2) + 1)
assert_equal(pick_list2[0][0], 'meg')
test_data = rng.random_sample((4, 2000))
ch_names = ['MEG %03d' % i for i in [1, 2, 3, 4]]
ch_types = ['mag', 'mag', 'mag', 'mag']
sfreq = 250.0
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
raw = RawArray(test_data, info)
# Make sure checks for list input work.
assert_raises(ValueError, pick_channels, ch_names, 'MEG 001')
assert_raises(ValueError, pick_channels, ch_names, ['MEG 001'], 'hi')
pick_list = _picks_by_type(raw.info)
assert_equal(len(pick_list), 1)
assert_equal(pick_list[0][0], 'mag')
pick_list2 = _picks_by_type(raw.info, meg_combined=True)
assert_equal(len(pick_list), len(pick_list2))
assert_equal(pick_list2[0][0], 'mag')
def test_clean_info_bads():
"""Test cleaning info['bads'] when bad_channels are excluded """
raw_file = op.join(op.dirname(__file__), 'io', 'tests', 'data',
'test_raw.fif')
raw = Raw(raw_file)
# select eeg channels
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
# select 3 eeg channels as bads
idx_eeg_bad_ch = picks_eeg[[1, 5, 14]]
eeg_bad_ch = [raw.info['ch_names'][k] for k in idx_eeg_bad_ch]
# select meg channels
picks_meg = pick_types(raw.info, meg=True, eeg=False)
# select randomly 3 meg channels as bads
idx_meg_bad_ch = picks_meg[[0, 15, 34]]
meg_bad_ch = [raw.info['ch_names'][k] for k in idx_meg_bad_ch]
# simulate the bad channels
raw.info['bads'] = eeg_bad_ch + meg_bad_ch
# simulate the call to pick_info excluding the bad eeg channels
info_eeg = pick_info(raw.info, picks_eeg)
# simulate the call to pick_info excluding the bad meg channels
info_meg = pick_info(raw.info, picks_meg)
assert_equal(info_eeg['bads'], eeg_bad_ch)
assert_equal(info_meg['bads'], meg_bad_ch)
run_tests_if_main()
| bsd-3-clause |
nickgentoo/scikit-learn-graph | skgraph/kernel/WLOrthogonalizedGraphKernel.py | 1 | 16012 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 3 12:04:44 2015
Copyright 2015 Nicolo' Navarin
This file is part of scikit-learn-graph.
scikit-learn-graph is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
scikit-learn-graph is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with scikit-learn-graph. If not, see <http://www.gnu.org/licenses/>.
The code is from the following source.
Weisfeiler_Lehman graph kernel.
Python implementation of Nino Shervashidze Matlab code at:
http://mlcb.is.tuebingen.mpg.de/Mitarbeiter/Nino/Graphkernels/
Author : Sandro Vega Pons
License:
"""
import numpy as np
import networkx as nx
import copy
import math
from KernelTools import convert_to_sparse_matrix
from graphKernel import GraphKernel
from scipy.sparse import dok_matrix
from sklearn.preprocessing import normalize
class WLOrthogonalizedGraphKernel(GraphKernel):
"""
Weisfeiler_Lehman graph kernel.
"""
def __init__(self, r = 1, normalization = False):
self.h=r
self.normalization=normalization
self.__startsymbol='!' #special symbols used in encoding
self.__conjsymbol='#'
self.__endsymbol='?'
self.__fsfeatsymbol='*'
self.__version=0
self.__contextsymbol='@'
def kernelFunction(self, g_1, g_2):
"""Compute the kernel value (similarity) between two graphs.
Parameters
----------
g1 : networkx.Graph
First graph.
g2 : networkx.Graph
Second graph.
h : interger
Number of iterations.
nl : boolean
Whether to use original node labels. True for using node labels
saved in the attribute 'node_label'. False for using the node
degree of each node as node attribute.
Returns
-------
k : The similarity value between g1 and g2.
"""
gl = [g_1, g_2]
return self.computeGram(gl)[0, 1]
def transform(self, graph_list):
"""
TODO
"""
n = len(graph_list) #number of graphs
phi = [{} for i in range(self.h+1)]
#phi={} #dictionary representing the phi vector for each graph. phi[r][c]=v each row is a graph. each column is a feature
NodeIdToLabelId = [0] * n # NodeIdToLabelId[i][j] is labelid of node j in graph i
label_lookup = {} #map from features to corresponding id
label_counter = 0 #incremental value for label ids
for i in range(n): #for each graph
NodeIdToLabelId[i] = {}
for j in graph_list[i].nodes(): #for each node
if not label_lookup.has_key(graph_list[i].node[j]['label']):#update label_lookup and label ids from first iteration that consider node's labels
label_lookup[graph_list[i].node[j]['label']] = label_counter
NodeIdToLabelId[i][j] = label_counter
label_counter += 1
else:
NodeIdToLabelId[i][j] = label_lookup[graph_list[i].node[j]['label']]
feature=self.__fsfeatsymbol+str(label_lookup[graph_list[i].node[j]['label']])
if not phi[0].has_key((i,feature)):
phi[0][(i,feature)]=0.0
phi[0][(i,feature)]+=1.0
### MAIN LOOP
#TOFO generate a vector for each it value
it = 1
NewNodeIdToLabelId = copy.deepcopy(NodeIdToLabelId) #labels id of nex iteration
while it <= self.h: #each iteration compute the next labellings (that are contexts of the previous)
label_lookup = {}
for i in range(n): #for each graph
for j in graph_list[i].nodes(): #for each node, consider its neighbourhood
neighbors=[]
for u in graph_list[i].neighbors(j):
neighbors.append(NodeIdToLabelId[i][u])
neighbors.sort() #sorting neighbours
long_label_string=str(NodeIdToLabelId[i][j])+self.__startsymbol #compute new labels id
for u in neighbors:
long_label_string+=str(u)+self.__conjsymbol
long_label_string=long_label_string[:-1]+self.__endsymbol
if not label_lookup.has_key(long_label_string):
label_lookup[long_label_string] = label_counter
NewNodeIdToLabelId[i][j] = label_counter
label_counter += 1
else:
NewNodeIdToLabelId[i][j] = label_lookup[long_label_string]
feature=self.__fsfeatsymbol+str(NewNodeIdToLabelId[i][j])
if not phi[it].has_key((i,feature)):
phi[it][(i,feature)]=0.0
phi[it][(i,feature)]+=1.0
NodeIdToLabelId = copy.deepcopy(NewNodeIdToLabelId) #update current labels id
it = it + 1
ve=map(convert_to_sparse_matrix,phi)
#ve=convert_to_sparse_matrix(phi)
if self.normalization:
for i in range(len(ve)):
ve[i] = normalize(ve[i], norm='l2', axis=1)
#print type(ve)
return ve
# def transform(self, graph_list):
# """
# TODO
# """
# n = len(graph_list) #number of graphs
#
# phi={} #dictionary representing the phi vector for each graph. phi[r][c]=v each row is a graph. each column is a feature
#
# NodeIdToLabelId = [dict() for x in range(n)] # NodeIdToLabelId[i][j] is labelid of node j in graph i
# label_lookup = {} #map from features to corresponding id
# label_counter = long(1) #incremental value for label ids
#
# for i in range(n): #for each graph
# #NodeIdToLabelId[i] = {}
# #nx.draw(graph_list[i])
#
#
# for j in graph_list[i].nodes(): #for each node
# if not label_lookup.has_key(graph_list[i].node[j]['label']):#update label_lookup and label ids from first iteration that consider node's labels
# label_lookup[graph_list[i].node[j]['label']] = label_counter
# NodeIdToLabelId[i][j] = label_counter
# label_counter += 1
# else:
# NodeIdToLabelId[i][j] = label_lookup[graph_list[i].node[j]['label']]
#
# feature=self.__fsfeatsymbol+str(label_lookup[graph_list[i].node[j]['label']])
# if not phi.has_key((i,feature)):
# phi[(i,feature)]=0.0
# phi[(i,feature)]+=1.0
#
# ### MAIN LOOP
# it = 0
# NewNodeIdToLabelId = copy.deepcopy(NodeIdToLabelId) #labels id of nex iteration
#
# while it < self.h: #each iteration compute the next labellings (that are contexts of the previous)
# label_lookup = {}
#
# for i in range(n): #for each graph
# for j in graph_list[i].nodes(): #for each node, consider its neighbourhood
# neighbors=[]
# for u in graph_list[i].neighbors(j):
# neighbors.append(NodeIdToLabelId[i][u])
# neighbors.sort() #sorting neighbours
#
# long_label_string=str(NodeIdToLabelId[i][j])+self.__startsymbol #compute new labels id
# for u in neighbors:
# long_label_string+=str(u)+self.__conjsymbol
# long_label_string=long_label_string[:-1]+self.__endsymbol
#
# if not label_lookup.has_key(long_label_string):
# label_lookup[long_label_string] = label_counter
# NewNodeIdToLabelId[i][j] = label_counter
# label_counter += 1
# else:
# NewNodeIdToLabelId[i][j] = label_lookup[long_label_string]
#
# feature=self.__fsfeatsymbol+str(NewNodeIdToLabelId[i][j])
# if not phi.has_key((i,feature)):
# phi[(i,feature)]=0.0
# phi[(i,feature)]+=1.0
#
#
# NodeIdToLabelId = copy.deepcopy(NewNodeIdToLabelId) #update current labels id
# it = it + 1
# #print phi
# return convert_to_sparse_matrix(phi)
# def transform(self, graph_list):
# """
# TODO
# """
# n = len(graph_list) #number of graphs
#
# phi={} #dictionary representing the phi vector for each graph. phi[r][c]=v each row is a graph. each column is a feature
# #phi=dok_matrix()
# NodeIdToLabelId = [0] * n # NodeIdToLabelId[i][j] is labelid of node j in graph i
# label_lookup = {} #map from features to corresponding id
# label_counter = 0 #incremental value for label ids
#
# for i in xrange(n): #for each graph
# NodeIdToLabelId[i] = {}
#
# for j in graph_list[i].nodes():
# enc=graph_list[i].node[j]['label'] #"0"+
# if enc not in label_lookup:#update label_lookup and label ids
# label_lookup[enc] = label_counter
# NodeIdToLabelId[i][j] = label_counter
# label_counter += 1
# else:
# NodeIdToLabelId[i][j] = label_lookup[enc]
# #print enc, label_lookup[enc]
# if (i,label_lookup[enc]) not in phi:
# phi[i,label_lookup[enc]]=0
# phi[i,label_lookup[enc]]+=1
#
# ### MAIN LOOP
# it = 0
# NewNodeIdToLabelId = copy.deepcopy(NodeIdToLabelId)
# #label_lookup = {}
#
# while it < self.h:
# label_lookup = {}
#
# for i in xrange(n): #for each graph
# for j in graph_list[i].nodes(): #for each node, consider its neighbourhood
# neighbors=[]
# for u in graph_list[i].neighbors(j):
# #print u,
# neighbors.append(NodeIdToLabelId[i][u])
# neighbors.sort()
# #print
# long_label_string=str(NodeIdToLabelId[i][j])#str(it+1)+self.__startsymbol+
# for u in neighbors:
# long_label_string+=self.__conjsymbol+str(u)
# #long_label_string=long_label_string[:-1]+self.__endsymbol
# if long_label_string not in label_lookup:
# label_lookup[long_label_string] = label_counter
# NewNodeIdToLabelId[i][j] = label_counter
# label_counter += 1
# else:
# NewNodeIdToLabelId[i][j] = label_lookup[long_label_string]
# print long_label_string, NewNodeIdToLabelId[i][j]
#
# if (i,NewNodeIdToLabelId[i][j]) not in phi:
# phi[i,NewNodeIdToLabelId[i][j]]=0
# phi[i,NewNodeIdToLabelId[i][j]]+=1
#
# NodeIdToLabelId = copy.deepcopy(NewNodeIdToLabelId)
# it = it + 1
# #return dok_matrix(phi.todense()).tocsr()
# return convert_to_sparse_matrix(phi)
# def transform(self, graph_list):
# """
# TODO
# """
# n = len(graph_list) #number of graphs
#
# phi={} #dictionary representing the phi vector for each graph. phi[r][c]=v each row is a graph. each column is a feature
#
# NodeIdToLabelId = [0] * n # NodeIdToLabelId[i][j] is labelid of node j in graph i
# label_lookup = {} #map from features to corresponding id
# label_counter = 1 #incremental value for label ids
#
# for i in range(n): #for each graph
# NodeIdToLabelId[i] = {}
#
# for j in graph_list[i].nodes():
# #print graph_list[i].node[j]['label']
# if not label_lookup.has_key("0|"+str(graph_list[i].node[j]['label'])):#update label_lookup and label ids
# label_lookup["0|"+str(graph_list[i].node[j]['label'])] = label_counter
# NodeIdToLabelId[i][j] = label_counter
# label_counter += 1
# else:
# NodeIdToLabelId[i][j] = label_lookup["0|"+str(graph_list[i].node[j]['label'])]
#
# if not phi.has_key((i,label_lookup["0|"+str(graph_list[i].node[j]['label'])])):
# phi[(i,label_lookup["0|"+str(graph_list[i].node[j]['label'])])]=0
# phi[(i,label_lookup["0|"+str(graph_list[i].node[j]['label'])])]+=1
#
# ### MAIN LOOP
# it = 0
# NewNodeIdToLabelId = copy.deepcopy(NodeIdToLabelId)
# #NewNodeIdToLabelId =[0] * n
# while it < self.h:
# label_lookup = {}
#
# for i in range(n): #for each graph
# for j in graph_list[i].nodes(): #for each node, consider its neighbourhood
# neighbors=[]
# for u in graph_list[i].neighbors(j):
# #print u
# neighbors.append(NodeIdToLabelId[i][u])
# neighbors.sort()
# if len(neighbors)==0:
# print "Empty neighbors"
# #MODIFICATO RISPETTO a TESSELLI str(it)+self.__startsymbol+
# long_label_string=str(it+1)+"|"+str(NodeIdToLabelId[i][j])+self.__startsymbol
# for u in neighbors:
# long_label_string+=str(u)+self.__conjsymbol
# #long_label_string=long_label_string[:-1]+self.__endsymbol
# long_label_string=long_label_string[:-1]+self.__endsymbol
#
# if len(neighbors)==0:
# print long_label_string
#
# if not label_lookup.has_key(long_label_string):
# label_lookup[long_label_string] = label_counter
# NewNodeIdToLabelId[i][j] = label_counter
# label_counter += 1
# else:
# NewNodeIdToLabelId[i][j] = label_lookup[long_label_string]
#
# if not phi.has_key((i,NewNodeIdToLabelId[i][j])):
# phi[(i,NewNodeIdToLabelId[i][j])]=0
# phi[(i,NewNodeIdToLabelId[i][j])]+=1
#
# NodeIdToLabelId = copy.deepcopy(NewNodeIdToLabelId)
# it = it + 1
# return convert_to_sparse_matrix(phi)
# def __normalization(self, gram):
# """
# TODO
# """
# if self.normalization:
# diagonal=np.diag(gram)
# a=np.tile(diagonal,(gram.shape[0],1))
# b=diagonal.reshape((gram.shape[0],1))
# b=np.tile(b,(1,gram.shape[1]))
#
# return gram/np.sqrt(a*b)
# else :
# return gram
def computeKernelMatrixTrain(self,Graphs):
return self.computeGram(Graphs)
def computeGram(self,g_it,precomputed=None):
if precomputed is None:
precomputed=self.transform(g_it)
return precomputed.dot(precomputed.T).todense().tolist()
| gpl-3.0 |
benoitsteiner/tensorflow-xsmm | tensorflow/contrib/learn/python/learn/datasets/synthetic.py | 40 | 7451 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Synthetic dataset generators (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets.base import Dataset
from tensorflow.python.util.deprecation import deprecated
@deprecated(None, 'Consider using synthetic datasets from scikits.learn.')
def circles(n_samples=100,
noise=None,
seed=None,
factor=0.8,
n_classes=2,
*args,
**kwargs):
"""Create circles separated by some value
Args:
n_samples: int, number of datapoints to generate
noise: float or None, standard deviation of the Gaussian noise added
seed: int or None, seed for the noise
factor: float, size factor of the inner circles with respect to the outer
ones
n_classes: int, number of classes to generate
Returns:
Shuffled features and labels for 'circles' synthetic dataset of type
`base.Dataset`
Note:
The multi-class support might not work as expected if `noise` is enabled
TODO:
- Generation of unbalanced data
Credit goes to (under BSD 3 clause):
B. Thirion,
G. Varoquaux,
A. Gramfort,
V. Michel,
O. Grisel,
G. Louppe,
J. Nothman
"""
if seed is not None:
np.random.seed(seed)
# Algo: 1) Generate initial circle, 2) For ever class generate a smaller radius circle
linspace = np.linspace(0, 2 * np.pi, n_samples // n_classes)
circ_x = np.empty(0, dtype=np.int32)
circ_y = np.empty(0, dtype=np.int32)
base_cos = np.cos(linspace)
base_sin = np.sin(linspace)
y = np.empty(0, dtype=np.int32)
for label in range(n_classes):
circ_x = np.append(circ_x, base_cos)
circ_y = np.append(circ_y, base_sin)
base_cos *= factor
base_sin *= factor
y = np.append(y, label * np.ones(n_samples // n_classes, dtype=np.int32))
# Add more points if n_samples is not divisible by n_classes (unbalanced!)
extras = n_samples % n_classes
circ_x = np.append(circ_x, np.cos(np.random.rand(extras) * 2 * np.pi))
circ_y = np.append(circ_y, np.sin(np.random.rand(extras) * 2 * np.pi))
y = np.append(y, np.zeros(extras, dtype=np.int32))
# Reshape the features/labels
X = np.vstack((circ_x, circ_y)).T
y = np.hstack(y)
# Shuffle the data
indices = np.random.permutation(range(n_samples))
if noise is not None:
X += np.random.normal(scale=noise, size=X.shape)
return Dataset(data=X[indices], target=y[indices])
@deprecated(None, 'Consider using synthetic datasets from scikits.learn.')
def spirals(n_samples=100,
noise=None,
seed=None,
mode='archimedes',
n_loops=2,
*args,
**kwargs):
"""Create spirals
Currently only binary classification is supported for spiral generation
Args:
n_samples: int, number of datapoints to generate
noise: float or None, standard deviation of the Gaussian noise added
seed: int or None, seed for the noise
n_loops: int, number of spiral loops, doesn't play well with 'bernoulli'
mode: str, how the spiral should be generated. Current implementations:
'archimedes': a spiral with equal distances between branches
'bernoulli': logarithmic spiral with branch distances increasing
'fermat': a spiral with branch distances decreasing (sqrt)
Returns:
Shuffled features and labels for 'spirals' synthetic dataset of type
`base.Dataset`
Raises:
ValueError: If the generation `mode` is not valid
TODO:
- Generation of unbalanced data
"""
n_classes = 2 # I am not sure how to make it multiclass
_modes = {
'archimedes': _archimedes_spiral,
'bernoulli': _bernoulli_spiral,
'fermat': _fermat_spiral
}
if mode is None or mode not in _modes:
raise ValueError('Cannot generate spiral with mode %s' % mode)
if seed is not None:
np.random.seed(seed)
linspace = np.linspace(0, 2 * n_loops * np.pi, n_samples // n_classes)
spir_x = np.empty(0, dtype=np.int32)
spir_y = np.empty(0, dtype=np.int32)
y = np.empty(0, dtype=np.int32)
for label in range(n_classes):
base_cos, base_sin = _modes[mode](linspace, label * np.pi, *args, **kwargs)
spir_x = np.append(spir_x, base_cos)
spir_y = np.append(spir_y, base_sin)
y = np.append(y, label * np.ones(n_samples // n_classes, dtype=np.int32))
# Add more points if n_samples is not divisible by n_classes (unbalanced!)
extras = n_samples % n_classes
if extras > 0:
x_extra, y_extra = _modes[mode](np.random.rand(extras) * 2 * np.pi, *args,
**kwargs)
spir_x = np.append(spir_x, x_extra)
spir_y = np.append(spir_y, y_extra)
y = np.append(y, np.zeros(extras, dtype=np.int32))
# Reshape the features/labels
X = np.vstack((spir_x, spir_y)).T
y = np.hstack(y)
# Shuffle the data
indices = np.random.permutation(range(n_samples))
if noise is not None:
X += np.random.normal(scale=noise, size=X.shape)
return Dataset(data=X[indices], target=y[indices])
def _archimedes_spiral(theta, theta_offset=0., *args, **kwargs):
"""Return Archimedes spiral
Args:
theta: array-like, angles from polar coordinates to be converted
theta_offset: float, angle offset in radians (2*pi = 0)
"""
x, y = theta * np.cos(theta + theta_offset), theta * np.sin(
theta + theta_offset)
x_norm = np.max(np.abs(x))
y_norm = np.max(np.abs(y))
x, y = x / x_norm, y / y_norm
return x, y
def _bernoulli_spiral(theta, theta_offset=0., *args, **kwargs):
"""Return Equiangular (Bernoulli's) spiral
Args:
theta: array-like, angles from polar coordinates to be converted
theta_offset: float, angle offset in radians (2*pi = 0)
Kwargs:
exp_scale: growth rate of the exponential
"""
exp_scale = kwargs.pop('exp_scale', 0.1)
x, y = np.exp(exp_scale * theta) * np.cos(theta + theta_offset), np.exp(
exp_scale * theta) * np.sin(theta + theta_offset)
x_norm = np.max(np.abs(x))
y_norm = np.max(np.abs(y))
x, y = x / x_norm, y / y_norm
return x, y
def _fermat_spiral(theta, theta_offset=0., *args, **kwargs):
"""Return Parabolic (Fermat's) spiral
Args:
theta: array-like, angles from polar coordinates to be converted
theta_offset: float, angle offset in radians (2*pi = 0)
"""
x, y = np.sqrt(theta) * np.cos(theta + theta_offset), np.sqrt(theta) * np.sin(
theta + theta_offset)
x_norm = np.max(np.abs(x))
y_norm = np.max(np.abs(y))
x, y = x / x_norm, y / y_norm
return x, y
| apache-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pycparser/ply/ctokens.py | 206 | 3177 | # ----------------------------------------------------------------------
# ctokens.py
#
# Token specifications for symbols in ANSI C and C++. This file is
# meant to be used as a library in other tokenizers.
# ----------------------------------------------------------------------
# Reserved words
tokens = [
# Literals (identifier, integer constant, float constant, string constant, char const)
'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER',
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
# Increment/decrement (++,--)
'INCREMENT', 'DECREMENT',
# Structure dereference (->)
'ARROW',
# Ternary operator (?)
'TERNARY',
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'PERIOD', 'SEMI', 'COLON',
# Ellipsis (...)
'ELLIPSIS',
]
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MODULO = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_ANDEQUAL = r'&='
t_OREQUAL = r'\|='
t_XOREQUAL = r'\^='
# Increment/decrement
t_INCREMENT = r'\+\+'
t_DECREMENT = r'--'
# ->
t_ARROW = r'->'
# ?
t_TERNARY = r'\?'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
t_ELLIPSIS = r'\.\.\.'
# Identifiers
t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
# Integer literal
t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
# Floating literal
t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
t_STRING = r'\"([^\\\n]|(\\.))*?\"'
# Character constant 'c' or L'c'
t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
# Comment (C-Style)
def t_COMMENT(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
return t
# Comment (C++-Style)
def t_CPPCOMMENT(t):
r'//.*\n'
t.lexer.lineno += 1
return t
| mit |
jamesrobertlloyd/automl-phase-2 | sandpit.py | 1 | 4567 | __author__ = 'James Robert Lloyd'
__description__ = 'Scraps of code before module structure becomes apparent'
from util import callback_1d
import pybo
from pybo.functions.functions import _cleanup, GOModel
import numpy as np
from sklearn.datasets import load_iris
from sklearn.datasets import make_hastie_10_2
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
import os
import sys
sys.path.append(os.path.dirname(__file__))
from multiprocessing import Pool
from sandpit_two import print_globals, import_and_print_globals
import global_data
@_cleanup
class Sinusoidal(GOModel):
"""
Simple sinusoidal function bounded in [0, 2pi] given by cos(x)+sin(3x).
"""
bounds = [[0, 2*np.pi]]
xmax = 3.61439678
@staticmethod
def _f(x):
return -np.ravel(np.cos(x) + np.sin(3*x))
@_cleanup
class CV_RF(GOModel):
"""
Cross validated random forest
"""
bounds = [[1, 25]]
xmax = 10 # FIXME - should this not be optional?
@staticmethod
def _f(x):
# iris = load_iris()
X, y = X, y = make_hastie_10_2(random_state=0)
x = np.ravel(x)
f = np.zeros(x.shape)
for i in range(f.size):
clf = RandomForestClassifier(n_estimators=1, min_samples_leaf=int(np.round(x[i])), random_state=0)
# scores = cross_val_score(clf, iris.data, iris.target)
scores = cross_val_score(clf, X, y, cv=5)
f[i] = -scores.mean()
return f.ravel()
from multiprocessing import Process, Queue, Manager, Array
from Queue import Empty as q_Empty
import cPickle as pickle
import time
from agent import Agent, start_communication #, start_communication_debug
# class DummyAgent(AgentWithData):
# def __init__(self, name='Give me a name', cpu_budget=1, **kwargs):
# super(DummyAgent, self).__init__(**kwargs)
# self.name = name
# self.cpu_budget = cpu_budget
# self._value = 1
#
# def serialize(self, filename):
# del self.shared_array
# del self.nparray # we don't need to delete it for pickle to work, but it will be incorrect on unpickling
# with open(filename, 'wb') as pickle_file:
# pickle.dump(self, pickle_file)
# self.terminated = True
#
# def next_action(self):
# while len(self.inbox) > 0:
# message = self.inbox.pop(0)
# print(self.nparray)
# print('Received message : %s' % message)
# if message['subject'] == 'serialize':
# self.serialize(message['filename'])
def separate_process():
print('I am a separate process')
from learners import DummyLearner
# def multi_pickle_experiment():
# q = Queue()
# arr = Array('d', range(5))
# a = DummyLearner(shared_array=arr, inbox_q=q)
# p = Process(target=start_communication_debug, kwargs=dict(agent=a))
# del a
# p.start()
# arr[0] = 99
# q.put(dict(subject='A message'))
# time.sleep(2)
# raw_input('Press return to continue')
# arr[0] = 9
# q.put(dict(subject='Another message'))
# time.sleep(2)
# q.put(dict(subject='serialize', filename='temp/dill.pk'))
# time.sleep(2)
# raw_input('Press return to continue')
# p.join()
# print('Process has serialized itself')
# raw_input('Press return to revive')
# arr[0] = 999
# p = Process(target=start_communication_debug, kwargs=dict(pickle_filename='temp/dill.pk', shared_array=arr))
# p.start()
# q.put(dict(subject='A second message'))
# time.sleep(2)
# raw_input('Press return to kill')
# q.put(dict(subject='terminate'))
# p.join()
# print('Success')
# def print_globals(_):
# time.sleep(5)
# print(globals())
# X = my_global
# X = X + 1
# print(X)
# time.sleep(5)
def global_test():
raw_input('I begin')
# global my_global
global_data.my_global = np.full((2**17, 2 * 2**10), 42)
raw_input('Globals created')
processing_pool = Pool(10)
processing_pool.map(import_and_print_globals, [None] * 10)
processing_pool.close()
processing_pool.join()
raw_input('Multiprocessing complete')
if __name__ == '__main__':
# objective = CV_RF()
#
# info = pybo.solve_bayesopt(
# objective,
# objective.bounds,
# niter=25,
# noisefree=False,
# rng=0,
# init='uniform',
# callback=callback_1d)
#
# print('Finished')
#
# raw_input('Press enter to finish')
global_test()
| mit |
GoogleCloudPlatform/public-datasets-pipelines | datasets/bls/pipelines/c_cpi_u/c_cpi_u_dag.py | 2 | 3848 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.cncf.kubernetes.operators import kubernetes_pod
from airflow.providers.google.cloud.transfers import gcs_to_bigquery
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-03-01",
}
with DAG(
dag_id="bls.c_cpi_u",
default_args=default_args,
max_active_runs=1,
schedule_interval="@daily",
catchup=False,
default_view="graph",
) as dag:
# Run CSV transform within kubernetes pod
transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="transform_csv",
startup_timeout_seconds=600,
name="c_cpi_u",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.bls.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URLS": '["gs://pdp-feeds-staging/Bureau/inflat_listarea_area_join.csv","gs://pdp-feeds-staging/Bureau/cu.item.tsv"]',
"SOURCE_FILES": '["files/data1.csv","files/data2.tsv"]',
"TARGET_FILE": "files/data_output.csv",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/bls/c_cpi_u/data_output.csv",
"PIPELINE_NAME": "c_cpi_u",
"FILE_PATH": "files/",
"JOINING_KEY": "item_code",
"TRIM_SPACE": '["series_id","value","footnote_codes","item_code"]',
"CSV_HEADERS": '["series_id","year","period","value","footnote_codes","survey_abbreviation","seasonal_code","periodicity_code","area_code","area_name","item_code","item_name","date"]',
},
resources={
"request_memory": "2G",
"request_cpu": "1",
"request_ephemeral_storage": "10G",
},
)
# Task to load CSV data to a BigQuery table
load_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=["data/bls/c_cpi_u/data_output.csv"],
source_format="CSV",
destination_project_dataset_table="bls.c_cpi_u",
skip_leading_rows=1,
allow_quoted_newlines=True,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{"name": "series_id", "type": "STRING", "mode": "required"},
{"name": "year", "type": "INTEGER", "mode": "NULLABLE"},
{"name": "period", "type": "STRING", "mode": "NULLABLE"},
{"name": "value", "type": "FLOAT", "mode": "NULLABLE"},
{"name": "footnote_codes", "type": "STRING", "mode": "NULLABLE"},
{"name": "survey_abbreviation", "type": "STRING", "mode": "NULLABLE"},
{"name": "seasonal_code", "type": "STRING", "mode": "NULLABLE"},
{"name": "periodicity_code", "type": "STRING", "mode": "NULLABLE"},
{"name": "area_code", "type": "STRING", "mode": "NULLABLE"},
{"name": "area_name", "type": "STRING", "mode": "NULLABLE"},
{"name": "item_code", "type": "STRING", "mode": "NULLABLE"},
{"name": "item_name", "type": "STRING", "mode": "NULLABLE"},
{"name": "date", "type": "DATE", "mode": "NULLABLE"},
],
)
transform_csv >> load_to_bq
| apache-2.0 |
dimkal/mne-python | examples/decoding/plot_decoding_time_generalization.py | 5 | 1947 | """
==========================================================
Decoding sensor space data with Generalization Across Time
==========================================================
This example runs the analysis computed in:
Jean-Remi King, Alexandre Gramfort, Aaron Schurger, Lionel Naccache
and Stanislas Dehaene, "Two distinct dynamic modes subtend the detection of
unexpected sounds", PLOS ONE, 2013,
http://www.ncbi.nlm.nih.gov/pubmed/24475052
The idea is to learn at one time instant and assess if the decoder
can predict accurately over time.
"""
# Authors: Jean-Remi King <jeanremi.king@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne.datasets import spm_face
from mne.decoding import GeneralizationAcrossTime
print(__doc__)
# Preprocess data
data_path = spm_face.data_path()
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D_raw.fif'
raw = mne.io.Raw(raw_fname % 1, preload=True) # Take first run
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(1, 45, method='iir')
events = mne.find_events(raw, stim_channel='UPPT001')
event_id = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.1, 0.5
decim = 4 # decimate to make the example faster to run
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=None, preload=True,
reject=dict(mag=1.5e-12), decim=decim, verbose=False)
# Define decoder. The decision_function is employed to use AUC for scoring
gat = GeneralizationAcrossTime(predict_mode='cross-validation', n_jobs=2)
# fit and score
gat.fit(epochs)
gat.score(epochs)
gat.plot(vmin=0.1, vmax=0.9,
title="Generalization Across Time (faces vs. scrambled)")
gat.plot_diagonal() # plot decoding across time (correspond to GAT diagonal)
| bsd-3-clause |
DinoCow/airflow | tests/providers/google/cloud/hooks/test_automl.py | 3 | 9839 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
from google.cloud.automl_v1beta1 import AutoMlClient, PredictionServiceClient
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_no_default_project_id
CREDENTIALS = "test-creds"
CLIENT_INFO = "client-info"
TASK_ID = "test-automl-hook"
GCP_PROJECT_ID = "test-project"
GCP_LOCATION = "test-location"
MODEL_NAME = "test_model"
MODEL_ID = "projects/198907790164/locations/us-central1/models/TBL9195602771183665152"
DATASET_ID = "TBL123456789"
MODEL = {
"display_name": MODEL_NAME,
"dataset_id": DATASET_ID,
"tables_model_metadata": {"train_budget_milli_node_hours": 1000},
}
LOCATION_PATH = AutoMlClient.location_path(GCP_PROJECT_ID, GCP_LOCATION)
MODEL_PATH = PredictionServiceClient.model_path(GCP_PROJECT_ID, GCP_LOCATION, MODEL_ID)
DATASET_PATH = AutoMlClient.dataset_path(GCP_PROJECT_ID, GCP_LOCATION, DATASET_ID)
INPUT_CONFIG = {"input": "value"}
OUTPUT_CONFIG = {"output": "value"}
PAYLOAD = {"test": "payload"}
DATASET = {"dataset_id": "data"}
MASK = {"field": "mask"}
class TestAuoMLHook(unittest.TestCase):
def setUp(self) -> None:
with mock.patch(
"airflow.providers.google.cloud.hooks.automl.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = CloudAutoMLHook()
self.hook._get_credentials = mock.MagicMock(return_value=CREDENTIALS) # type: ignore
@mock.patch(
"airflow.providers.google.cloud.hooks.automl.GoogleBaseHook.client_info",
new_callable=lambda: CLIENT_INFO,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient")
def test_get_conn(self, mock_automl_client, mock_client_info):
self.hook.get_conn()
mock_automl_client.assert_called_once_with(credentials=CREDENTIALS, client_info=CLIENT_INFO)
@mock.patch(
"airflow.providers.google.cloud.hooks.automl.GoogleBaseHook.client_info",
new_callable=lambda: CLIENT_INFO,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.PredictionServiceClient")
def test_prediction_client(self, mock_prediction_client, mock_client_info):
client = self.hook.prediction_client # pylint: disable=unused-variable # noqa
mock_prediction_client.assert_called_once_with(credentials=CREDENTIALS, client_info=CLIENT_INFO)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.create_model")
def test_create_model(self, mock_create_model):
self.hook.create_model(model=MODEL, location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_create_model.assert_called_once_with(
parent=LOCATION_PATH, model=MODEL, retry=None, timeout=None, metadata=None
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.PredictionServiceClient.batch_predict")
def test_batch_predict(self, mock_batch_predict):
self.hook.batch_predict(
model_id=MODEL_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
input_config=INPUT_CONFIG,
output_config=OUTPUT_CONFIG,
)
mock_batch_predict.assert_called_once_with(
name=MODEL_PATH,
input_config=INPUT_CONFIG,
output_config=OUTPUT_CONFIG,
params=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.PredictionServiceClient.predict")
def test_predict(self, mock_predict):
self.hook.predict(
model_id=MODEL_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
payload=PAYLOAD,
)
mock_predict.assert_called_once_with(
name=MODEL_PATH,
payload=PAYLOAD,
params=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.create_dataset")
def test_create_dataset(self, mock_create_dataset):
self.hook.create_dataset(dataset=DATASET, location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_create_dataset.assert_called_once_with(
parent=LOCATION_PATH,
dataset=DATASET,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.import_data")
def test_import_dataset(self, mock_import_data):
self.hook.import_data(
dataset_id=DATASET_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
input_config=INPUT_CONFIG,
)
mock_import_data.assert_called_once_with(
name=DATASET_PATH,
input_config=INPUT_CONFIG,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.list_column_specs")
def test_list_column_specs(self, mock_list_column_specs):
table_spec = "table_spec_id"
filter_ = "filter"
page_size = 42
self.hook.list_column_specs(
dataset_id=DATASET_ID,
table_spec_id=table_spec,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
field_mask=MASK,
filter_=filter_,
page_size=page_size,
)
parent = AutoMlClient.table_spec_path(GCP_PROJECT_ID, GCP_LOCATION, DATASET_ID, table_spec)
mock_list_column_specs.assert_called_once_with(
parent=parent,
field_mask=MASK,
filter_=filter_,
page_size=page_size,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.get_model")
def test_get_model(self, mock_get_model):
self.hook.get_model(model_id=MODEL_ID, location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_get_model.assert_called_once_with(name=MODEL_PATH, retry=None, timeout=None, metadata=None)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.delete_model")
def test_delete_model(self, mock_delete_model):
self.hook.delete_model(model_id=MODEL_ID, location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_delete_model.assert_called_once_with(name=MODEL_PATH, retry=None, timeout=None, metadata=None)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.update_dataset")
def test_update_dataset(self, mock_update_dataset):
self.hook.update_dataset(
dataset=DATASET,
update_mask=MASK,
)
mock_update_dataset.assert_called_once_with(
dataset=DATASET, update_mask=MASK, retry=None, timeout=None, metadata=None
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.deploy_model")
def test_deploy_model(self, mock_deploy_model):
image_detection_metadata = {}
self.hook.deploy_model(
model_id=MODEL_ID,
image_detection_metadata=image_detection_metadata,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
)
mock_deploy_model.assert_called_once_with(
name=MODEL_PATH,
retry=None,
timeout=None,
metadata=None,
image_object_detection_model_deployment_metadata=image_detection_metadata,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.list_table_specs")
def test_list_table_specs(self, mock_list_table_specs):
filter_ = "filter"
page_size = 42
self.hook.list_table_specs(
dataset_id=DATASET_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT_ID,
filter_=filter_,
page_size=page_size,
)
mock_list_table_specs.assert_called_once_with(
parent=DATASET_PATH,
filter_=filter_,
page_size=page_size,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.list_datasets")
def test_list_datasets(self, mock_list_datasets):
self.hook.list_datasets(location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_list_datasets.assert_called_once_with(
parent=LOCATION_PATH, retry=None, timeout=None, metadata=None
)
@mock.patch("airflow.providers.google.cloud.hooks.automl.AutoMlClient.delete_dataset")
def test_delete_dataset(self, mock_delete_dataset):
self.hook.delete_dataset(dataset_id=DATASET_ID, location=GCP_LOCATION, project_id=GCP_PROJECT_ID)
mock_delete_dataset.assert_called_once_with(
name=DATASET_PATH, retry=None, timeout=None, metadata=None
)
| apache-2.0 |
midnightradio/gensim | docs/src/auto_examples/tutorials/run_doc2vec_lee.py | 8 | 16167 | r"""
Doc2Vec Model
=============
Introduces Gensim's Doc2Vec model and demonstrates its use on the
`Lee Corpus <https://hekyll.services.adelaide.edu.au/dspace/bitstream/2440/28910/1/hdl_28910.pdf>`__.
"""
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
###############################################################################
# Doc2Vec is a :ref:`core_concepts_model` that represents each
# :ref:`core_concepts_document` as a :ref:`core_concepts_vector`. This
# tutorial introduces the model and demonstrates how to train and assess it.
#
# Here's a list of what we'll be doing:
#
# 0. Review the relevant models: bag-of-words, Word2Vec, Doc2Vec
# 1. Load and preprocess the training and test corpora (see :ref:`core_concepts_corpus`)
# 2. Train a Doc2Vec :ref:`core_concepts_model` model using the training corpus
# 3. Demonstrate how the trained model can be used to infer a :ref:`core_concepts_vector`
# 4. Assess the model
# 5. Test the model on the test corpus
#
# Review: Bag-of-words
# --------------------
#
# .. Note:: Feel free to skip these review sections if you're already familiar with the models.
#
# You may be familiar with the `bag-of-words model
# <https://en.wikipedia.org/wiki/Bag-of-words_model>`_ from the
# :ref:`core_concepts_vector` section.
# This model transforms each document to a fixed-length vector of integers.
# For example, given the sentences:
#
# - ``John likes to watch movies. Mary likes movies too.``
# - ``John also likes to watch football games. Mary hates football.``
#
# The model outputs the vectors:
#
# - ``[1, 2, 1, 1, 2, 1, 1, 0, 0, 0, 0]``
# - ``[1, 1, 1, 1, 0, 1, 0, 1, 2, 1, 1]``
#
# Each vector has 10 elements, where each element counts the number of times a
# particular word occurred in the document.
# The order of elements is arbitrary.
# In the example above, the order of the elements corresponds to the words:
# ``["John", "likes", "to", "watch", "movies", "Mary", "too", "also", "football", "games", "hates"]``.
#
# Bag-of-words models are surprisingly effective, but have several weaknesses.
#
# First, they lose all information about word order: "John likes Mary" and
# "Mary likes John" correspond to identical vectors. There is a solution: bag
# of `n-grams <https://en.wikipedia.org/wiki/N-gram>`__
# models consider word phrases of length n to represent documents as
# fixed-length vectors to capture local word order but suffer from data
# sparsity and high dimensionality.
#
# Second, the model does not attempt to learn the meaning of the underlying
# words, and as a consequence, the distance between vectors doesn't always
# reflect the difference in meaning. The ``Word2Vec`` model addresses this
# second problem.
#
# Review: ``Word2Vec`` Model
# --------------------------
#
# ``Word2Vec`` is a more recent model that embeds words in a lower-dimensional
# vector space using a shallow neural network. The result is a set of
# word-vectors where vectors close together in vector space have similar
# meanings based on context, and word-vectors distant to each other have
# differing meanings. For example, ``strong`` and ``powerful`` would be close
# together and ``strong`` and ``Paris`` would be relatively far.
#
# Gensim's :py:class:`~gensim.models.word2vec.Word2Vec` class implements this model.
#
# With the ``Word2Vec`` model, we can calculate the vectors for each **word** in a document.
# But what if we want to calculate a vector for the **entire document**\ ?
# We could average the vectors for each word in the document - while this is quick and crude, it can often be useful.
# However, there is a better way...
#
# Introducing: Paragraph Vector
# -----------------------------
#
# .. Important:: In Gensim, we refer to the Paragraph Vector model as ``Doc2Vec``.
#
# Le and Mikolov in 2014 introduced the `Doc2Vec algorithm <https://cs.stanford.edu/~quocle/paragraph_vector.pdf>`__,
# which usually outperforms such simple-averaging of ``Word2Vec`` vectors.
#
# The basic idea is: act as if a document has another floating word-like
# vector, which contributes to all training predictions, and is updated like
# other word-vectors, but we will call it a doc-vector. Gensim's
# :py:class:`~gensim.models.doc2vec.Doc2Vec` class implements this algorithm.
#
# There are two implementations:
#
# 1. Paragraph Vector - Distributed Memory (PV-DM)
# 2. Paragraph Vector - Distributed Bag of Words (PV-DBOW)
#
# .. Important::
# Don't let the implementation details below scare you.
# They're advanced material: if it's too much, then move on to the next section.
#
# PV-DM is analogous to Word2Vec CBOW. The doc-vectors are obtained by training
# a neural network on the synthetic task of predicting a center word based an
# average of both context word-vectors and the full document's doc-vector.
#
# PV-DBOW is analogous to Word2Vec SG. The doc-vectors are obtained by training
# a neural network on the synthetic task of predicting a target word just from
# the full document's doc-vector. (It is also common to combine this with
# skip-gram testing, using both the doc-vector and nearby word-vectors to
# predict a single target word, but only one at a time.)
#
# Prepare the Training and Test Data
# ----------------------------------
#
# For this tutorial, we'll be training our model using the `Lee Background
# Corpus
# <https://hekyll.services.adelaide.edu.au/dspace/bitstream/2440/28910/1/hdl_28910.pdf>`_
# included in gensim. This corpus contains 314 documents selected from the
# Australian Broadcasting Corporation’s news mail service, which provides text
# e-mails of headline stories and covers a number of broad topics.
#
# And we'll test our model by eye using the much shorter `Lee Corpus
# <https://hekyll.services.adelaide.edu.au/dspace/bitstream/2440/28910/1/hdl_28910.pdf>`_
# which contains 50 documents.
#
import os
import gensim
# Set file names for train and test data
test_data_dir = os.path.join(gensim.__path__[0], 'test', 'test_data')
lee_train_file = os.path.join(test_data_dir, 'lee_background.cor')
lee_test_file = os.path.join(test_data_dir, 'lee.cor')
###############################################################################
# Define a Function to Read and Preprocess Text
# ---------------------------------------------
#
# Below, we define a function to:
#
# - open the train/test file (with latin encoding)
# - read the file line-by-line
# - pre-process each line (tokenize text into individual words, remove punctuation, set to lowercase, etc)
#
# The file we're reading is a **corpus**.
# Each line of the file is a **document**.
#
# .. Important::
# To train the model, we'll need to associate a tag/number with each document
# of the training corpus. In our case, the tag is simply the zero-based line
# number.
#
import smart_open
def read_corpus(fname, tokens_only=False):
with smart_open.open(fname, encoding="iso-8859-1") as f:
for i, line in enumerate(f):
tokens = gensim.utils.simple_preprocess(line)
if tokens_only:
yield tokens
else:
# For training data, add tags
yield gensim.models.doc2vec.TaggedDocument(tokens, [i])
train_corpus = list(read_corpus(lee_train_file))
test_corpus = list(read_corpus(lee_test_file, tokens_only=True))
###############################################################################
# Let's take a look at the training corpus
#
print(train_corpus[:2])
###############################################################################
# And the testing corpus looks like this:
#
print(test_corpus[:2])
###############################################################################
# Notice that the testing corpus is just a list of lists and does not contain
# any tags.
#
###############################################################################
# Training the Model
# ------------------
#
# Now, we'll instantiate a Doc2Vec model with a vector size with 50 dimensions and
# iterating over the training corpus 40 times. We set the minimum word count to
# 2 in order to discard words with very few occurrences. (Without a variety of
# representative examples, retaining such infrequent words can often make a
# model worse!) Typical iteration counts in the published `Paragraph Vector paper <https://cs.stanford.edu/~quocle/paragraph_vector.pdf>`__
# results, using 10s-of-thousands to millions of docs, are 10-20. More
# iterations take more time and eventually reach a point of diminishing
# returns.
#
# However, this is a very very small dataset (300 documents) with shortish
# documents (a few hundred words). Adding training passes can sometimes help
# with such small datasets.
#
model = gensim.models.doc2vec.Doc2Vec(vector_size=50, min_count=2, epochs=40)
###############################################################################
# Build a vocabulary
model.build_vocab(train_corpus)
###############################################################################
# Essentially, the vocabulary is a list (accessible via
# ``model.wv.index_to_key``) of all of the unique words extracted from the training corpus.
# Additional attributes for each word are available using the ``model.wv.get_vecattr()`` method,
# For example, to see how many times ``penalty`` appeared in the training corpus:
#
print(f"Word 'penalty' appeared {model.wv.get_vecattr('penalty', 'count')} times in the training corpus.")
###############################################################################
# Next, train the model on the corpus.
# If optimized Gensim (with BLAS library) is being used, this should take no more than 3 seconds.
# If the BLAS library is not being used, this should take no more than 2
# minutes, so use optimized Gensim with BLAS if you value your time.
#
model.train(train_corpus, total_examples=model.corpus_count, epochs=model.epochs)
###############################################################################
# Now, we can use the trained model to infer a vector for any piece of text
# by passing a list of words to the ``model.infer_vector`` function. This
# vector can then be compared with other vectors via cosine similarity.
#
vector = model.infer_vector(['only', 'you', 'can', 'prevent', 'forest', 'fires'])
print(vector)
###############################################################################
# Note that ``infer_vector()`` does *not* take a string, but rather a list of
# string tokens, which should have already been tokenized the same way as the
# ``words`` property of original training document objects.
#
# Also note that because the underlying training/inference algorithms are an
# iterative approximation problem that makes use of internal randomization,
# repeated inferences of the same text will return slightly different vectors.
#
###############################################################################
# Assessing the Model
# -------------------
#
# To assess our new model, we'll first infer new vectors for each document of
# the training corpus, compare the inferred vectors with the training corpus,
# and then returning the rank of the document based on self-similarity.
# Basically, we're pretending as if the training corpus is some new unseen data
# and then seeing how they compare with the trained model. The expectation is
# that we've likely overfit our model (i.e., all of the ranks will be less than
# 2) and so we should be able to find similar documents very easily.
# Additionally, we'll keep track of the second ranks for a comparison of less
# similar documents.
#
ranks = []
second_ranks = []
for doc_id in range(len(train_corpus)):
inferred_vector = model.infer_vector(train_corpus[doc_id].words)
sims = model.dv.most_similar([inferred_vector], topn=len(model.dv))
rank = [docid for docid, sim in sims].index(doc_id)
ranks.append(rank)
second_ranks.append(sims[1])
###############################################################################
# Let's count how each document ranks with respect to the training corpus
#
# NB. Results vary between runs due to random seeding and very small corpus
import collections
counter = collections.Counter(ranks)
print(counter)
###############################################################################
# Basically, greater than 95% of the inferred documents are found to be most
# similar to itself and about 5% of the time it is mistakenly most similar to
# another document. Checking the inferred-vector against a
# training-vector is a sort of 'sanity check' as to whether the model is
# behaving in a usefully consistent manner, though not a real 'accuracy' value.
#
# This is great and not entirely surprising. We can take a look at an example:
#
print('Document ({}): «{}»\n'.format(doc_id, ' '.join(train_corpus[doc_id].words)))
print(u'SIMILAR/DISSIMILAR DOCS PER MODEL %s:\n' % model)
for label, index in [('MOST', 0), ('SECOND-MOST', 1), ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]:
print(u'%s %s: «%s»\n' % (label, sims[index], ' '.join(train_corpus[sims[index][0]].words)))
###############################################################################
# Notice above that the most similar document (usually the same text) is has a
# similarity score approaching 1.0. However, the similarity score for the
# second-ranked documents should be significantly lower (assuming the documents
# are in fact different) and the reasoning becomes obvious when we examine the
# text itself.
#
# We can run the next cell repeatedly to see a sampling other target-document
# comparisons.
#
# Pick a random document from the corpus and infer a vector from the model
import random
doc_id = random.randint(0, len(train_corpus) - 1)
# Compare and print the second-most-similar document
print('Train Document ({}): «{}»\n'.format(doc_id, ' '.join(train_corpus[doc_id].words)))
sim_id = second_ranks[doc_id]
print('Similar Document {}: «{}»\n'.format(sim_id, ' '.join(train_corpus[sim_id[0]].words)))
###############################################################################
# Testing the Model
# -----------------
#
# Using the same approach above, we'll infer the vector for a randomly chosen
# test document, and compare the document to our model by eye.
#
# Pick a random document from the test corpus and infer a vector from the model
doc_id = random.randint(0, len(test_corpus) - 1)
inferred_vector = model.infer_vector(test_corpus[doc_id])
sims = model.dv.most_similar([inferred_vector], topn=len(model.dv))
# Compare and print the most/median/least similar documents from the train corpus
print('Test Document ({}): «{}»\n'.format(doc_id, ' '.join(test_corpus[doc_id])))
print(u'SIMILAR/DISSIMILAR DOCS PER MODEL %s:\n' % model)
for label, index in [('MOST', 0), ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]:
print(u'%s %s: «%s»\n' % (label, sims[index], ' '.join(train_corpus[sims[index][0]].words)))
###############################################################################
# Conclusion
# ----------
#
# Let's review what we've seen in this tutorial:
#
# 0. Review the relevant models: bag-of-words, Word2Vec, Doc2Vec
# 1. Load and preprocess the training and test corpora (see :ref:`core_concepts_corpus`)
# 2. Train a Doc2Vec :ref:`core_concepts_model` model using the training corpus
# 3. Demonstrate how the trained model can be used to infer a :ref:`core_concepts_vector`
# 4. Assess the model
# 5. Test the model on the test corpus
#
# That's it! Doc2Vec is a great way to explore relationships between documents.
#
# Additional Resources
# --------------------
#
# If you'd like to know more about the subject matter of this tutorial, check out the links below.
#
# * `Word2Vec Paper <https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf>`_
# * `Doc2Vec Paper <https://cs.stanford.edu/~quocle/paragraph_vector.pdf>`_
# * `Dr. Michael D. Lee's Website <http://faculty.sites.uci.edu/mdlee>`_
# * `Lee Corpus <http://faculty.sites.uci.edu/mdlee/similarity-data/>`__
# * `IMDB Doc2Vec Tutorial <doc2vec-IMDB.ipynb>`_
#
| gpl-3.0 |
cajohnst/Optimized_FX_Portfolio | Pull_Data.py | 1 | 12619 | ''' Pull all data to be used in Optimize_FX_Portfolio, RSI_sample, MACD_sample, and futures_vs_spot (eventually pull fred economic reports) '''
import settings as sv
import pandas as pd
import quandl as qdl
import numpy as np
from pandas import Series, DataFrame
import datetime
from datetime import timedelta, date
import rollover_google_sheet
import fxstreet_google_sheet
import RSI_sample
import fxstreet_scraper
import sklearn as sklearn
from sklearn import preprocessing
from sklearn.linear_model import Ridge
import StringIO
import csv
import import_spot_test
def main():
currency_list = get_currency_list()
currency_quandl_list = get_currency_quandl_list()
fed_list = get_fed_list()
# Calculate beginning date
beg_date = sv.end_date - timedelta(sv.num_days_regression)
#############################################################################################################################
# Import events data into pandas dataframe
econ_calendar_full = pd.DataFrame(fxstreet_google_sheet.pull_data(sv.num_days_regression))
# If no consensus value exists, use the previous value to fill its place.
econ_calendar_full.Consensus.fillna(econ_calendar_full.Previous, inplace = True)
# Create column 'Deviation' as the actual value on the data release subtracting the market expectation
econ_calendar_full['Deviation'] = econ_calendar_full['Actual'] - econ_calendar_full['Consensus']
# #Take today's Economic Events from FXstreet_scraper and format
econ_calendar_today = fxstreet_scraper.main()
econ_calendar_today = StringIO.StringIO(econ_calendar_today)
econ_calendar_today = pd.read_csv(econ_calendar_today, header=0, index_col= False)
econ_calendar_today['DateTime'] = pd.to_datetime(econ_calendar_today['DateTime'])
econ_calendar_today = econ_calendar_today.set_index('DateTime')
econ_calendar_today.index.names = ['DateTime']
econ_calendar_today.Consensus.fillna(econ_calendar_today.Previous, inplace = True)
econ_calendar_today.dropna(thresh= 5, inplace = True)
#Begin raw input of user predictions for data releases (fill 'Actual' column with predictions).
for index, row in econ_calendar_today.iterrows():
prediction = raw_input("Prediction for {0} in {1} given the market consensus is {2}.\n Your Prediction:".format(row['Name'], row['Country'], row['Consensus']))
econ_calendar_today.set_value(index, 'Actual', prediction)
# Append today's data to the full calendar of releases
econ_calendar_full = econ_calendar_full.append(econ_calendar_today)
#############################################################################################################################
#RSI_sample data
# For stochastic data, import daily highs and lows into dataframe from quandl
# Highs and Lows are unreliable before sv.stoch_date (and need to be continually monitored)
list_high = [high.replace('1', '2') for high in currency_quandl_list]
list_low = [low.replace('1', '3') for low in currency_quandl_list]
# Maximum of the 'rolling' periods
max_lag = max(sv.q, sv.nslow, sv.nfast, sv.nema, sv.ma_slow, sv.ma_fast, sv.n, sv.d)
#Pull data from quandl
currency_table = get_currency_data(currency_list, currency_quandl_list, sv.num_days_regression, sv.end_date , sv.auth_tok)
live_rates = import_spot_test.main()
currency_table = currency_table.append(live_rates)
#Get daily lows from quandl for stochastic oscillator
low_table = get_currency_data(currency_list, list_low, sv.num_days_regression, sv.end_date , sv.auth_tok)
#Get daily highs from quandl for stochastic oscillator
high_table = get_currency_data(currency_list, list_high, sv.num_days_regression, sv.end_date , sv.auth_tok)
# #Calculate RSI for all currency pairs in currency_table
RSI = RSI_sample.RSI_Calc(currency_table, sv.q)
#Calculate exponentially weighted moving averages and MACD
emaslow, emafast, macd = RSI_sample.get_MACD(currency_table, nslow= sv.nslow, nfast = sv.nfast)
ema9 = RSI_sample.moving_average(macd, sv.nema, type = 'exponential')
#Calculate stochastics
fast_stochastic, slow_stochastic = RSI_sample.get_stochastic(currency_table, low_table, high_table, sv.n, sv.d)
# #Calculate simple moving averages
ma_f = RSI_sample.moving_average(currency_table, sv.ma_fast, type='simple')
ma_s = RSI_sample.moving_average(currency_table, sv.ma_slow, type='simple')
# Drop all NaNs, format data so indexes will match when joined
RSI = RSI_sample.drop_rows(RSI, max_lag)
ma_f = RSI_sample.drop_rows(ma_f, max_lag)
ma_s = RSI_sample.drop_rows(ma_s, max_lag)
emaslow= RSI_sample.drop_rows(emaslow, max_lag)
emafast= RSI_sample.drop_rows(emafast, max_lag)
macd = RSI_sample.drop_rows(macd, max_lag)
ema9 = RSI_sample.drop_rows(ema9, max_lag)
fast_stochastic = RSI_sample.drop_rows(fast_stochastic, max_lag)
slow_stochastic = RSI_sample.drop_rows(slow_stochastic, max_lag)
currency_table = RSI_sample.drop_rows(currency_table, max_lag)
#################################################################################################################
#Create fundamentals, merge tables, perform ridge regression, output daily return predictions
# Convert price data to returns and delete NaNs
returns_table = currency_table.pct_change(periods= sv.shift).dropna()
returns_table.drop(returns_table.index[:1], inplace=True)
returns_table = 100 * sv.leverage * returns_table
fed_table = get_fed_data(fed_list, sv.num_days_regression, sv.end_date , sv.auth_tok)
# Specialize data for events! Pull all historical data from event calendar which matches name in econ data dictionary.
economic_data_dict = get_economic_data_dict()
all_fundamentals_table = query_past_economic_data(econ_calendar_today, econ_calendar_full, fed_table, economic_data_dict)
#Merge the calendar data with the columns of technicals
regression_table = merge_with_technicals(currency_list, returns_table, all_fundamentals_table, RSI, macd, fast_stochastic, beg_date, sv.stoch_date)
return regression_table
def query_past_economic_data(calendar_today, calendar_full, fed_table, economic_data_dict):
# Get all historical data from the full calendar which matches today's data and is found in the econ data dictionary.
for index, values in calendar_today.iterrows():
country = values['Country']
if country in economic_data_dict:
event_name = values['Name']
if event_name in economic_data_dict[country]:
pull_events = calendar_full[(calendar_full['Country'] == country) & (calendar_full['Name'] == event_name)]
fed_table = fed_table.join(pull_events['Deviation'], how = 'left', rsuffix = ' of {0}'.format(event_name))
fed_table = fed_table.fillna(value = 0)
else:
print ' *** {0} not a listed event for {1} in the Economic Dictionary ***'.format(event_name, country)
else:
print ' *** {0} is not a country in the Economic Dictionary ***'.format(country)
return fed_table
def get_currency_quandl_list():
currency_quandl_list = ['CURRFX/MXNUSD.1', 'CURRFX/USDCAD.1', 'CURRFX/NZDUSD.1', 'CURRFX/USDHKD.1', 'CURRFX/USDJPY.1', 'CURRFX/USDSGD.1', 'CURRFX/GBPUSD.1', 'CURRFX/USDZAR.1',
'CURRFX/AUDUSD.1', 'CURRFX/EURUSD.1']
return currency_quandl_list
def get_currency_list():
currency_list = ['USD/MXN', 'USD/CAD', 'NZD/USD', 'USD/HKD', 'USD/JPY', 'USD/SGD', 'GBP/USD', 'USD/ZAR', 'AUD/USD', 'EUR/USD']
return currency_list
def get_economic_data_dict():
# Dictonary keys are the country name
# Key values are tuples structured as ([list of eventnames from fxstreet], [list of eventnames from quandl])
economic_data_dict = {
'United States':
['Consumer Price Index (YoY)', 'Consumer Price Index Ex Food & Energy (YoY)', 'Nonfarm Payrolls', 'Reuters/Michigan Consumer Sentiment Index', 'Baker Hughes US Oil Rig Count', 'Durable Goods Orders', 'Durable Goods Orders ex Transportation', 'Retail Sales (MoM)', 'Initial Jobless Claims', 'ADP Employment Change', 'Gross Domestic Product Annualized', 'Unemployment Rate', 'M2', 'Housing Starts (MoM)', 'Building Permits (MoM)', '10-Year Note Auction',
'EIA Crude Oil Stocks change', 'S&P/Case-Shiller Home Price Indices (YoY)', 'Markit Services PMI', 'Markit PMI Composite', 'Consumer Confidence', 'Dallas Fed Manufacturing Business Index', 'ISM Prices Paid', 'ISM Manufacturing PMI', 'Markit Manufacturing PMI', 'Construction Spending (MoM)', 'Trade Balance', 'ISM Non-Manufacturing PMI', 'Factory Orders (MoM)']
,
'Japan':
['National Consumer Price Index (YoY)', 'Foreign investment in Japan stocks', 'Foreign bond investment', 'Unemployment Rate', 'Industrial Production (MoM)', 'Industrial Production (YoY)' ]
,
'European Monetary Union':
['Unemployment Rate', 'Consumer Price Index (YoY)', 'Consumer Price Index - Core (YoY)', 'Markit Manufacturing PMI', 'Producer Price Index (MoM)', 'Producer Price Index (YoY)', 'Markit Services PMI']
,
'Germany':
['Markit Manufacturing PMI', '10-y Bond Auction']
,
'Australia':
['TD Securities Inflation (YoY)', 'TD Securities Inflation (MoM)', 'RBA Interest Rate Decision', 'Retail Sales s.a (MoM)']
,
'Canada':
['RBC Manufacturing PMI']
,
'New Zealand':
[]
,
'China':
[]
,
'United Kingdom':
['Markit Manufacturing PMI', 'PMI Construction']
,
'Italy':
[]
,
'Switzerland':
['Real Retail Sales (YoY)']
,
'France':
[]
}
return economic_data_dict
def get_fed_list():
fed_list = ['Federal_Funds_Futures', 'Effective_Funds_Rate']
return fed_list
def get_currency_data(currency_list, currency_quandl_list, num_days_regression, end_date , api_key):
# Calculate dates to begin and end
start_date = end_date - timedelta(num_days_regression)
# Initialize data table
data_table = None
# Run through currencies, first assignment is initialized
# Anything past first currency is joined into table
for currency in currency_quandl_list:
current_column = qdl.get(currency, start_date= start_date, end_date = end_date , authtoken= api_key)
current_column.columns = [currency]
if data_table is None:
data_table = current_column
else:
data_table = data_table.join(current_column, how= 'left', rsuffix= '')
data_table.columns = currency_list
if 'USD/MXN' in currency_list:
data_table['USD/MXN'] = 1 / data_table['USD/MXN']
return data_table
def get_fed_data(fed_reserve_list, num_days_regression, end_date , api_key):
# Calculate dates
start_date = end_date - timedelta(num_days_regression)
# Get Federal Funds Futures data and Effective Funds Futures
fed_fund_futures = qdl.get('CHRIS/CME_FF1.6', start_date= start_date, end_date = end_date , authtoken= api_key)
effective_fund_futures = qdl.get('FRED/DFF', start_date = start_date, end_date = end_date , authtoken= api_key)
fed_table = fed_fund_futures.join(effective_fund_futures, how= 'left', rsuffix= '')
fed_table.columns = fed_reserve_list
fed_table.fillna(method = 'ffill', inplace = True )
# Calculate the probability of a rate hike as the difference between rolling federal funds futures and the effective funds rate divided by the amount of the hike.
# (Multiplied by 100 for percentage)
fed_table['rate_hike_prob_25_basis'] = (((100 - fed_table['Federal_Funds_Futures']) - fed_table['Effective_Funds_Rate'])/ 0.25) * 100
fed_table = fed_table.drop('Federal_Funds_Futures', axis= 1)
fed_table = fed_table.drop('Effective_Funds_Rate', axis= 1)
return fed_table
def get_benchmark(benchmark_list, benchmark_quandl_list, num_days_regression, end_date , api_key, shift):
# Get returns of a benchmark asset
start_date = end_date - timedelta(num_days_regression)
benchmark_table = qdl.get(benchmark_quandl_list, start_date = start_date, end_date = end_date , authtoken= api_key)
benchmark_table.columns = ['Benchmark']
benchmark_returns = benchmark_table.pct_change(periods= shift).dropna() * 100
benchmark_returns.drop(benchmark_returns.index[:1], inplace=True)
return benchmark_returns
def merge_with_technicals(currency_list, returns_table, fundamentals_table, RSI, MACD, Stochastics, beg_date, stoch_date):
# Create empty list, will hold dataframes for all currencies
dataframe_list = []
for currency in currency_list:
buildup_dataframe = DataFrame(returns_table[currency])
buildup_dataframe = buildup_dataframe.join(fundamentals_table, how= 'left', rsuffix= '')
buildup_dataframe = buildup_dataframe.join(RSI[currency], how= 'left', rsuffix= '_RSI')
buildup_dataframe = buildup_dataframe.join(MACD[currency], how='left', rsuffix='_MACD')
if beg_date > stoch_date:
buildup_dataframe = buildup_dataframe.join(Stochastics[currency], how='left', rsuffix='_Stoch')
dataframe_list.append(buildup_dataframe)
return dataframe_list
if __name__ == "__main__":
main() | mit |
yonglehou/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 232 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
mxjl620/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 228 | 2823 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
yonglehou/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 308 | 5431 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
pkruskal/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 308 | 5431 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
benoitsteiner/tensorflow-xsmm | tensorflow/python/training/distribute.py | 4 | 49880 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class DistributionStrategy, TowerContext, and supporting APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses_impl
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import device_util
from tensorflow.python.util import nest
# ------------------------------------------------------------------------------
# Internal API for setting the current thread mode as being either in a
# tower or cross-tower context for a particular distribution strategy.
class _ThreadMode(object):
def __init__(self, dist, cross, tower):
self.distribution_strategy = dist
self.cross_tower_context = cross
self.tower_context = tower
class _CrossTowerThreadMode(_ThreadMode):
def __init__(self, distribution_strategy):
_ThreadMode.__init__(
self, distribution_strategy, distribution_strategy, None)
class _InTowerThreadMode(_ThreadMode):
def __init__(self, tower_ctx):
_ThreadMode.__init__(
self, tower_ctx.distribution_strategy, None, tower_ctx)
_per_thread_mode = threading.local()
def _push_per_thread_mode(context):
if not hasattr(_per_thread_mode, "stack"):
_per_thread_mode.stack = []
_per_thread_mode.stack.append(context)
def _pop_per_thread_mode():
_per_thread_mode.stack.pop(-1)
class _DefaultTowerThreadMode(_ThreadMode):
"""Type of default value returned by `_get_per_thread_mode()`.
Used when the thread-local stack is empty.
"""
def __init__(self):
# _default_distribution_strategy and _default_tower_context are
# defined at the bottom of this file.
_ThreadMode.__init__(
self, _default_distribution_strategy, None, _default_tower_context)
def _get_per_thread_mode():
try:
return _per_thread_mode.stack[-1]
except (AttributeError, IndexError):
# _default_tower_mode is defined at the bottom of this file.
return _default_tower_mode
# ------------------------------------------------------------------------------
# Context tracking whether in a distribution.update() or .update_non_slot()
# call.
_update_device = threading.local()
def get_update_device():
"""Get the current device if in a `DistributionStrategy.update()` call."""
try:
return _update_device.current
except AttributeError:
return None
class UpdateContext(object):
"""Context manager when you are in `update()` or `update_non_slot()`."""
def __init__(self, device):
self._device = device
self._old_device = None
def __enter__(self):
self._old_device = get_update_device()
_update_device.current = self._device
def __exit__(self, exception_type, exception_value, traceback):
del exception_type, exception_value, traceback
_update_device.current = self._old_device
# ------------------------------------------------------------------------------
# Public API for accessing the current thread mode
def get_tower_context():
"""Returns the current TowerContext or None if in a cross-tower context.
Note that execution:
1. starts in the default (single-tower) tower context (this function
will return the default TowerContext object);
2. switches to cross-tower context (in which case this will return
None) when entering a `with DistributionStrategy.scope():` block;
3. switches to a (non-default) tower context inside
`call_for_each_tower(fn, ...)`;
4. if `fn` calls `get_tower_context()->merge_call(merge_fn, ...)`, then
inside `merge_fn` you are back in the cross-tower context (and again
this function will return None).
Note that you can also go directly from step 1 to 4 to switch to a
cross-tower context for the default `DistributionStrategy`. You may
also switch from the cross-tower context of 4 to a tower context by
calling `call_for_each_tower()`, jumping back to step 3.
Most `DistributionStrategy` methods may only be executed in
a cross-tower context, in a tower context you should use the
`TowerContext` API instead.
Returns:
The current `TowerContext` object when in a tower context scope, else None.
Exactly one of `get_tower_context()` and `get_cross_tower_context()`
will return None in a particular block.
"""
return _get_per_thread_mode().tower_context
def get_cross_tower_context():
"""Returns the current DistributionStrategy if in a cross-tower context.
Note that execution:
1. starts in the default (single-tower) tower context;
2. switches to cross-tower context when entering a
`with DistributionStrategy.scope():` block;
3. switches to a (non-default) tower context inside
`call_for_each_tower(fn, ...)`;
4. if `fn` calls `get_tower_context()->merge_call(merge_fn, ...)`, then
inside `merge_fn` you are back in the cross-tower context.
Note that you can also go directly from step 1 to 4 to switch to a
cross-tower context for the default `DistributionStrategy`. You may
also switch from the cross-tower context of 4 to a tower context by
calling `call_for_each_tower()`, jumping back to step 3.
Most `DistributionStrategy` methods may only be executed in
a cross-tower context.
Returns:
Returns the current `DistributionStrategy` object in a cross-tower
context, or None.
Exactly one of `get_tower_context()` and `get_cross_tower_context()`
will return None in a particular block.
"""
return _get_per_thread_mode().cross_tower_context
def get_distribution_strategy():
"""Returns the current `DistributionStrategy` object.
Prefer to use `get_tower_context()` or `get_cross_tower_context()`
instead when possible.
Returns:
A `DistributionStrategy` object. Inside a
`with distribution_strategy.scope()` block, it returns
`distribution_strategy`, otherwise it returns the default
(single-tower) `DistributionStrategy` object.
"""
return _get_per_thread_mode().distribution_strategy
def has_distribution_strategy():
"""Return if there is a current non-default `DistributionStrategy`.
Returns:
True if inside a `with distribution_strategy.scope():`.
"""
return get_distribution_strategy() is not _default_distribution_strategy
# ------------------------------------------------------------------------------
# Public utility functions.
def get_loss_reduction():
"""Reduce `method_string` corresponding to the last loss reduction."""
loss_reduction = ops.get_default_graph()._last_loss_reduction # pylint: disable=protected-access
if loss_reduction == losses_impl.Reduction.SUM:
return "sum"
return "mean"
# ------------------------------------------------------------------------------
# Internal API for validating the current thread mode
def _require_cross_tower_context(distribution_strategy):
"""Verify in cross-tower context for `distribution_strategy`."""
context = _get_per_thread_mode()
if context.cross_tower_context is distribution_strategy: return
# We have an error to report, figure out the right message.
if context.distribution_strategy is not distribution_strategy:
if context.distribution_strategy is _default_distribution_strategy:
raise RuntimeError(
'Need to be inside "with distribution_strategy.scope()" for %s' %
(distribution_strategy,))
else:
raise RuntimeError(
"Mixing different DistributionStrategy objects: %s is not %s" %
(context.distribution_strategy, distribution_strategy))
assert context.cross_tower_context is None
raise RuntimeError("Method requires being in cross-tower context, use "
"get_tower_context().merge_call()")
def require_tower_context(tower_ctx):
"""Verify in `tower_ctx` tower context."""
context = _get_per_thread_mode()
if context.tower_context is tower_ctx: return
# We have an error to report, figure out the right message.
if context.tower_context is None:
raise RuntimeError("Need to be inside `call_for_each_tower()`")
if context.distribution_strategy is tower_ctx.distribution_strategy:
# Two different TowerContexts with the same DistributionStrategy.
raise RuntimeError("Mismatching tower context.")
raise RuntimeError(
"Mismatching DistributionStrategy objects: %s is not %s." %
(context.distribution_strategy, tower_ctx.distribution_strategy))
def _require_distribution_strategy_scope(distribution_strategy):
"""Verify in a `distribution_strategy.scope()` in this thread."""
context = _get_per_thread_mode()
if context.distribution_strategy is distribution_strategy: return
# We have an error to report, figure out the right message.
if context.distribution_strategy is _default_distribution_strategy:
raise RuntimeError(
'Need to be inside "with distribution_strategy.scope()" for %s' %
(distribution_strategy,))
else:
raise RuntimeError(
"Mixing different DistributionStrategy objects: %s is not %s" %
(context.distribution_strategy, distribution_strategy))
# ------------------------------------------------------------------------------
# Internal context managers used to implement the DistributionStrategy
# base class
class _CurrentDistributionContext(object):
"""Context manager for setting the `DistributionStrategy` and var creator."""
def __init__(self,
distribution_strategy,
var_creator_scope,
var_scope=None,
default_device=None):
self._context = _CrossTowerThreadMode(distribution_strategy)
self._var_creator_scope = var_creator_scope
self._var_scope = var_scope
if default_device:
self._device_scope = ops.device(default_device)
else:
self._device_scope = None
def __enter__(self):
_push_per_thread_mode(self._context)
if self._var_scope:
self._var_scope.__enter__()
self._var_creator_scope.__enter__()
if self._device_scope:
self._device_scope.__enter__()
return self._context.distribution_strategy
def __exit__(self, exception_type, exception_value, traceback):
if self._device_scope:
self._device_scope.__exit__(exception_type, exception_value, traceback)
self._var_creator_scope.__exit__(exception_type, exception_value, traceback)
if self._var_scope:
self._var_scope.__exit__(exception_type, exception_value, traceback)
_pop_per_thread_mode()
class _SameScopeAgainContext(object):
"""Trivial context manager when you are already in `scope()`."""
def __init__(self, distribution_strategy):
self._distribution_strategy = distribution_strategy
def __enter__(self):
return self._distribution_strategy
def __exit__(self, exception_type, exception_value, traceback):
del exception_type, exception_value, traceback
# ------------------------------------------------------------------------------
# Base classes for all distribution strategies.
class DistributionStrategy(object):
"""A list of devices with a state & compute distribution policy.
The intent is that you can write an algorithm in a stylized way and
it will be usable with a variety of different `DistributionStrategy`
implementations. Each descendant will implement a different strategy
for distributing the algorithm across multiple devices/machines.
Furthermore, these changes can be hidden inside the specific layers
and other library classes that need special treatment to run in a
distributed setting, so that most users' model definition code can
run unchanged. The `DistributionStrategy` API works the same way
with eager and graph execution.
First let's introduce a few high-level concepts:
* _Data parallelism_ is where we run multiple copies of the model
on different slices of the input data. This is in contrast to
_model parallelism_ where we divide up a single copy of a model
across multiple devices.
Note: we only support data parallelism for now, but
hope to add support for model parallelism in the future.
* A _tower_ is one copy of the model, running on one slice of the
input data.
* _Synchronous_, or more commonly _sync_, training is where the
updates from each tower are aggregated together before updating
the model variables. This is in contrast to _asynchronous_, or
_async_ training, where each tower updates the model variables
independently.
* Furthermore you might run your computation on multiple devices
on one machine (or "host"), or on multiple machines/hosts.
If you are running on multiple machines, you might have a
single master host that drives computation across all of them,
or you might have multiple clients driving the computation
asynchronously.
To distribute an algorithm, we might use some of these ingredients:
* Parameter servers: These are hosts that hold a single copy of
parameters/variables. All towers that want to operate on a variable
retrieve it at the beginning of a step and send an update to be
applied at the end of the step. Can support either sync or async
training.
* Mirrored variables: These are variables that are copied to multiple
devices, where we keep the copies in sync by applying the same
updates to every copy. Normally would only be used with sync training.
* Reductions and Allreduce: A _reduction_ is some method of
aggregating multiple values into one value, like "sum" or
"mean". If doing sync training, we will perform a reduction on the
gradients to a parameter from all towers before applying the
update. Allreduce is an algorithm for performing a reduction on
values from multiple devices and making the result available on
all of those devices.
* In the future we will have support for TensorFlow's partitioned
variables, where a single variable is split across multiple
devices.
We have then a few approaches we want to support:
* Code written (as if) with no knowledge of class `DistributionStrategy`.
This code should work as before, even if some of the layers, etc.
used by that code are written to be distribution-aware. This is done
by having a default `DistributionStrategy` that gives ordinary behavior,
and by default being in a single tower context.
* Ordinary model code that you want to run using a specific
`DistributionStrategy`. This can be as simple as:
```
with my_distribution.scope():
iterator = my_distribution.distribute_dataset(
dataset).make_one_shot_iterator()
tower_train_ops = my_distribution.call_for_each_tower(
tower_fn, iterator.get_next())
train_op = tf.group(my_distribution.unwrap(tower_train_ops))
```
This takes an ordinary `dataset` and `tower_fn` and runs it
distributed using a particular `DistributionStrategy` in
`my_distribution`. Any variables created in `tower_fn` are created
using `my_distribution`'s policy, and library functions called by
`tower_fn` can use the `get_tower_context()` API to get enhanced
behavior in this case.
You can also create an initializable iterator instead of a one-shot
iterator. In that case, you will need to ensure that you initialize the
iterator before calling get_next.
```
iterator = my_distribution.distribute_dataset(
dataset).make_initializable_iterator())
session.run(iterator.initializer)
```
* If you want to write a distributed algorithm, you may use any of
the `DistributionStrategy` APIs inside a
`with my_distribution.scope():` block of code.
Lower-level concepts:
* Wrapped values: In order to represent values parallel across devices
(either towers or the devices associated with a particular value), we
wrap them in a "PerDevice" or "Mirrored" object that contains a map
from device to values. "PerDevice" is used when the value may be
different across devices, and "Mirrored" when the value are the same.
* Unwrapping and merging: Consider calling a function `fn` on
multiple devices, like `call_for_each_tower(fn, w)` with an
argument `w` that is a wrapped value. This means `w` will have a
map taking tower device `d0` to `w0`, tower device `d1` to `w1`,
etc. `call_for_each_tower()` unwraps `w` before calling `fn`, so
it calls `fn(w0)` on `d0`, `fn(w1)` on `d1`, etc. It then merges
the return values from `fn()`, which can possibly result in
wrapped values. For example, let's say `fn()` returns a tuple with
three components: `(x, a, v0)` from tower 0, `(x, b, v1)` on tower 1,
etc. If the first component is the same object `x` from every
tower, then the first component of the merged result will also be
`x`. If the second component is different (`a`, `b`, ...) from
each tower, then the merged value will have a wrapped map from
tower device to the different values. If the third component is
the members of a mirrored variable (`v` maps `d0` to `v0`, `d1` to
`v1`, etc.), then the merged result will be that mirrored variable
(`v`).
* Tower context vs. Cross-tower context: _tower context_ is when we
are in some function that is being called once for each tower.
Otherwise we are in cross-tower context, which is useful for
calling `DistributionStrategy` methods which operate across the
towers (like `reduce()`). By default you start in a tower context
(the default "single tower context") and then some methods can
switch you back and forth, as described below.
* Worker devices vs. parameter devices: Most tower computations will
happen on worker devices. Since we don't yet support model
parallelism, there will be one worker device per tower. When using
parameter servers (see above), the set of devices holding
variables may be different, otherwise the parameter devices might
match the worker devices.
* Non-slot devices are some subset of the parameter devices where we
put all the non-slot variables. We need to ensure that all
non-slot variables are allocated on the same device, or mirrored
across the same set of devices. If you have some variable you want
to colocate all the non-slot variables with, you can use
`colocate_vars_with()` to get the remaining non-slot variables on
the same device. Otherwise you can use `non_slot_devices()` to
pick a consistent set of devices to pass to both
`colocate_vars_with()` and `update_non_slot()`.
When using a `DistributionStrategy`, we have a new type dimension
called _locality_ that says what values are compatible with which
APIs:
* T: different value for each tower (e.g. a PerDevice-wrapped value).
* M: value is "mirrored" across towers, i.e. there are copies with the
same value on each tower (e.g. a Mirrored-wrapped value).
* V(`v`): value is "mirrored" across all the devices which have a
copy of variable `v` (also a Mirrored-wrapped value, but over
parameter devices instead of worker devices).
* N: value is "mirrored" across all the "non-slot" devices
Rules for methods with respect to locality and single-tower vs.
cross-tower context:
* `with d.scope()`: default single-tower context -> cross-tower context for
`d`
* `with d.colocate_vars_with(v)`: in tower/cross-tower context, variables
will be created with locality V(`v`). That is, if we write
`with d.colocate_vars_with(v1): v2 = tf.get_variable(...)`, then
`v2` will have locality V(`v1`), i.e. locality V(`v2`) will equal
V(`v1`).
* `with d.colocate_vars_with(d.non_slot_devices(...))`: in
tower/cross-tower context, variables will be created with locality N
* `v = tf.get_variable(...)`: in tower/cross-tower context, creates
a variable (which by definition will have locality V(`v`), though
will match another locality if inside a `colocate_vars_with`
scope).
* `d.distribute_dataset(dataset).make_one_shot_iterator()`: in cross-tower
context, produces an iterator with locality T
* `d.broadcast(t)`: in cross-tower context, produces a value with locality M
* `d.broadcast(t, v)`: in cross-tower context, produces a value with
locality V(`v`)
* `d.call_for_each_tower(fn, ...)`: in cross-tower context, runs
`fn()` in a tower context (and so may call `get_tower_context()` and
use its API, including `merge_call()` to get back to cross-tower
context), once for each tower. May use values with locality T or
M, and any variable.
* `d.reduce(m, t)`: in cross-tower context, accepts t with locality T
and produces a value with locality M.
* `d.reduce(m, t, v)`: in cross-tower context, accepts t with
locality T and produces a value with locality V(`v`).
* `d.batch_reduce(m, [(t, v)]): see `d.reduce()`
* `d.update(v, fn, ...)`: in cross-tower context, runs `fn()` once
for each device `v` is copied to, all inputs should have locality
V(`v`), output will have locality V(`v`) as well.
* `d.update_non_slot(d.non_slot_devices(), fn)`: in cross-tower
context, like `d.update()` except with locality N.
* `d.read_var(v)`: Gets the (read-only) value of the variable `v` (on
the device determined by the current device scope), aggregating
across towers for tower-local variables. Frequently, this will be
done automatically when using `v` in an expression or fetching it in
a cross-tower context, but this function can be used to force that
conversion happens at a particular point in time (for example, to
add the result of the conversion to a graph collection).
The standard pattern for updating variables is to:
1. Wrap your input dataset in `d.distribute_dataset()` and create an iterator.
2. Define each tower `d.call_for_each_tower()` up to the point of
getting a list of gradient, variable pairs.
3. Call `d.reduce("sum", t, v)` or `d.batch_reduce()` to sum the
gradients (with locality T) into values with locality V(`v`).
4. Call `d.update(v)` for each variable to update its value.
Steps 3 and 4 are done automatically by class `Optimizer` if you call
its `apply_gradients` method in a tower context. Otherwise you can
manually call its `_distributed_apply` method in a cross-tower context.
Another thing you might want to do in the middle of your tower function
is an all-reduce of some intermediate value, using `d.reduce()` or
`d.batch_reduce()` without supplying a variable as the destination.
Layers should expect to be called in a tower context, and can use
the `get_tower_context()` function to get a `TowerContext` object. The
`TowerContext` object has a `merge_call()` method for entering
cross-tower context where you can use `reduce()` (or
`batch_reduce()`) and then optionally `update()` to update state.
You may use this API whether or not a `DistributionStrategy` is
being used, since there is a default implementation of
`TowerContext` and `DistributionStrategy`. Or you can use the
`get_tower_context().is_single_tower` property to run different code
in the distributed vs. single tower cases.
"""
# TODO(josh11b): Raise an exception if variable partitioning requested before
# we add support.
# TODO(josh11b): Also `parameter_device_index` property?
# TODO(josh11b): `map()`
# TODO(josh11b): ClusterSpec/ClusterResolver
# TODO(josh11b): Partitioned computations, state; sharding
# TODO(josh11b): Model parallelism: "towers" with multiple devices; shuffling
# TODO(josh11b): List of towers with their worker and parameter devices
# (where the parameter devices may overlap in the ps case).
def __init__(self):
self._default_device = None
def scope(self):
"""Returns a context manager selecting this DistributionStrategy as current.
Inside a `with distribution_strategy.scope():` code block, this thread
will use a variable creator set by `distribution_strategy`, and will
enter its "cross-tower context".
Returns:
A context manager.
"""
if has_distribution_strategy():
_require_cross_tower_context(self)
return _SameScopeAgainContext(self)
def creator_with_resource_vars(*args, **kwargs):
_require_distribution_strategy_scope(self)
kwargs["use_resource"] = True
return self._create_variable(*args, **kwargs)
def disable_partitioned_variables(getter, *args, **kwargs):
if kwargs.pop("partitioner", None) is not None:
tf_logging.log_first_n(
tf_logging.WARN, "Partitioned variables are disabled when using "
"DistributionStrategy.", 1)
return getter(*args, **kwargs)
return _CurrentDistributionContext(
self, variable_scope.variable_creator_scope(creator_with_resource_vars),
variable_scope.variable_scope(
variable_scope.get_variable_scope(),
custom_getter=disable_partitioned_variables),
self._default_device)
def _create_variable(self, next_creator, *args, **kwargs):
# Note: should support "colocate_with" argument.
raise NotImplementedError("must be implemented in descendants")
def tower_local_var_scope(self, reduce_method):
"""Inside this scope, new variables will not be mirrored.
There will still be one component variable per tower, but there is
no requirement that they stay in sync. Instead, when saving them
or calling `read_var()`, we use the value that results when
calling `reduce()` on all the towers' variables.
Note: tower-local implies not trainable. Instead, it is expected
that each tower will directly update (using `assign_add()` or
whatever) its local variable instance but only the aggregated
value (accessible using `read_var()`) will be exported from the
model. When it is acceptable to only aggregate on export, we
greatly reduce communication overhead by using tower-local
variables.
Note: All component variables will be initialized to the same
value, using the initialization expression from the first tower.
The values will match even if the initialization expression uses
random numbers.
Args:
reduce_method: String used as a `method_string` to `reduce()`
to get the value to save when checkpointing.
Returns:
A context manager.
"""
def create_tower_local_variable(next_creator, *args, **kwargs):
_require_distribution_strategy_scope(self)
kwargs["use_resource"] = True
kwargs["tower_local_reduce_method"] = reduce_method
return next_creator(*args, **kwargs)
_require_distribution_strategy_scope(self)
return variable_scope.variable_creator_scope(create_tower_local_variable)
def read_var(self, v):
"""Reads the value of a variable.
Returns the aggregate value of a tower-local variable, or the
(read-only) value of any other variable.
Args:
v: A variable allocated within the scope of this `DistributionStrategy`.
Returns:
A tensor representing the value of `v`, aggregated across towers if
necessary.
"""
raise NotImplementedError("must be implemented in descendants")
def colocate_vars_with(self, colocate_with_variable):
"""Scope that controls which devices variables will be created on.
No operations should be added to the graph inside this scope, it
should only be used when creating variables (some implementations
work by changing variable creation, others work by using a
tf.colocate_with() scope).
This may only be used inside `self.scope()`.
Example usage:
```
with distribution_strategy.scope():
var1 = tf.get_variable(...)
with distribution_strategy.colocate_vars_with(v1):
# var2 and var3 will be created on the same device(s) as var1
var2 = tf.get_variable(...)
var3 = tf.get_variable(...)
def fn(v1, v2, v3):
# operates on v1 from var1, v2 from var2, and v3 from var3
# `fn` runs on every device `v1` is on, `v2` and `v3` will be there too.
distribution_strategy.update(v1, fn, v2, v3)
```
Args:
colocate_with_variable: A created in `self.scope()`. Variables created
while in the returned context manager will be on the same set of
devices as `colocate_with_variable`.
Returns:
A context manager.
"""
def create_colocated_variable(next_creator, *args, **kwargs):
_require_distribution_strategy_scope(self)
kwargs["use_resource"] = True
kwargs["colocate_with"] = colocate_with_variable
return next_creator(*args, **kwargs)
_require_distribution_strategy_scope(self)
return variable_scope.variable_creator_scope(create_colocated_variable)
def _call_dataset_fn(self, dataset_fn):
result = dataset_fn()
if not isinstance(result, dataset_ops.Dataset):
raise ValueError(
"dataset_fn() must return a tf.data.Dataset when using a "
"DistributionStrategy.")
return result
# TODO(josh11b): `PerDeviceDataset` currently only implements a few methods of
# Dataset API such as make_one_shot_iterator and make_initializable_iterator.
# Extend to implement more functionality of datasets.
def distribute_dataset(self, dataset_fn):
"""Return a `dataset` split across all towers.
Suitable for providing input to for `call_for_each_tower()` by creating an
iterator:
```
def dataset_fn():
return tf.data.Dataset.from_tensors([[1.]]).repeat()
with distribution_strategy.scope():
distributed_dataset = distribution_strategy.distribute_dataset(dataset_fn)
iterator = distributed_dataset.make_one_shot_iterator()
tower_results = distribution_strategy.call_for_each_tower(
tower_fn, iterator.get_next())
```
Args:
dataset_fn: A function that returns a `tf.data.Dataset`.
Returns:
A `PerDeviceDataset` that will produce data for each tower.
"""
raise NotImplementedError("must be implemented in descendants")
def broadcast(self, tensor, destinations=None):
"""Mirror a tensor on one device to all worker devices.
Args:
tensor: A Tensor value to broadcast.
destinations: An optional mirrored variable, device string, or
list of device strings, specifying the destination devices
to copy `tensor` to. Defaults to `self.worker_devices`.
Returns:
A value mirrored to `destinations` devices.
"""
# TODO(josh11b): More docstring
_require_cross_tower_context(self)
return self._broadcast(tensor, destinations)
def _broadcast(self, tensor, destinations):
raise NotImplementedError("must be implemented in descendants")
def call_for_each_tower(self, fn, *args, **kwargs):
"""Run `fn` once per tower.
`fn` may call `tf.get_tower_context()` to access methods such as
`tower_id()` and `merge_call()`.
`merge_call()` is used to communicate between the towers and
re-enter the cross-tower context. All towers pause their execution
having encountered a `merge_call()` call. After that the
`merge_fn`-function is executed. Its results are then unwrapped and
given back to each tower call. After that execution resumes until
`fn` is complete or encounters another `merge_call()`. Example:
```python
# Called once in "cross-tower" context.
def merge_fn(distribution, three_plus_tower_id):
# sum the values across towers
return sum(distribution.unwrap(three_plus_tower_id))
# Called once per tower in `distribution`, in a "tower" context.
def fn(three):
tower_ctx = tf.get_tower_context()
v = three + tower_ctx.tower_id
# Computes the sum of the `v` values across all towers.
s = tower_ctx.merge_call(merge_fn, v)
return s + v
with distribution.scope():
# in "cross-tower" context
...
merged_results = distribution.call_for_each_tower(fn, 3)
# merged_results has the values from every tower execution of `fn`.
print(distribution.unwrap(merged_results)) # Prints a list
```
Args:
fn: function to run (will be run once per tower).
*args: positional arguments for `fn`
**kwargs: keyword arguments for `fn`.
`"run_concurrently"`: Boolean indicating whether executions of `fn`
can be run concurrently (under eager execution only), defaults to
`True`.
Returns:
Merged return value of `fn` across all towers.
"""
_require_cross_tower_context(self)
return self._call_for_each_tower(fn, *args, **kwargs)
def _call_for_each_tower(self, fn, *args, **kwargs):
raise NotImplementedError("must be implemented in descendants")
def reduce(self, method_string, value, destinations=None):
"""Combine (via e.g. sum or mean) values across towers.
Args:
method_string: A string indicating how to combine values, either
"sum" or "mean".
value: A per-device value with one value per tower.
destinations: An optional mirrored variable, a device string,
list of device strings. The return value will be copied to all
destination devices (or all the devices where the mirrored
variable resides). If `None` or unspecified, the destinations
will match the devices `value` resides on.
Returns:
A value mirrored to `destinations`.
"""
# TODO(josh11b): More docstring
# TODO(josh11b): Return an unwrapped value if colocate_with is a
# single device.
_require_cross_tower_context(self)
assert method_string in ("sum", "mean")
return self._reduce(method_string, value, destinations)
def _reduce(self, method_string, value, destinations):
raise NotImplementedError("must be implemented in descendants")
def batch_reduce(self, method_string, value_destination_pairs):
"""Combine multiple `reduce` calls into one for faster execution.
Args:
method_string: A string indicating how to combine values, either
"sum" or "mean".
value_destination_pairs: A sequence of (value, destinations)
pairs. See `reduce()` for a description.
Returns:
A list of mirrored values, one per pair in `value_destination_pairs`.
"""
# TODO(josh11b): More docstring
_require_cross_tower_context(self)
assert method_string in ("sum", "mean")
return self._batch_reduce(method_string, value_destination_pairs)
def _batch_reduce(self, method_string, value_destination_pairs):
return [self.reduce(method_string, t, destinations=v)
for t, v in value_destination_pairs]
def update(self, var, fn, *args, **kwargs):
"""Run `fn` to update `var` using inputs mirrored to the same devices.
If `var` is mirrored across multiple devices, then this implements
logic like:
```
results = {}
for device, v in var:
with tf.device(device):
# *args and **kwargs will be unwrapped if they are mirrored.
results[device] = fn(v, *args, **kwargs)
return merged(results)
```
Otherwise this returns `fn(var, *args, **kwargs)` colocated with `var`.'
Neither *args nor **kwargs may contain per-device values.
If they contain mirrored values, they will be unwrapped before
calling `fn`.
Args:
var: Variable, possibly mirrored to multiple devices, to operate on.
fn: Function to call. Should take the variable as the first argument.
*args: Additional positional arguments to pass to `fn()`.
**kwargs: Keyword arguments to pass to `fn()`.
Returns:
Merged return value of `fn` across all towers.
"""
_require_cross_tower_context(self)
return self._update(var, fn, *args, **kwargs)
def _update(self, var, fn, *args, **kwargs):
raise NotImplementedError("must be implemented in descendants")
def update_non_slot(self, colocate_with, fn, *args, **kwargs):
"""Runs `fn(*args, **kwargs)` on `colocate_with` devices.
Args:
colocate_with: The return value of `non_slot_devices()`.
fn: Function to execute.
*args: Positional arguments to pass to `fn()`.
**kwargs: Keyword arguments to pass to `fn()`.
Returns:
Return value of `fn`, possibly merged across devices.
"""
_require_cross_tower_context(self)
return self._update_non_slot(colocate_with, fn, *args, **kwargs)
def _update_non_slot(self, colocate_with, fn, *args, **kwargs):
raise NotImplementedError("must be implemented in descendants")
def unwrap(self, value):
"""Returns the list of all per-device values contained in `value`.
Args:
value: A value returned by `call_for_each_tower()` or a variable
created in `scope()`.
Returns:
A list of values contained in `value`. If `value` represents a single
value, this returns `[value].`
"""
_require_cross_tower_context(self)
return self._unwrap(value)
def _unwrap(self, distributed_value):
raise NotImplementedError("must be implemented in descendants")
def group(self, value, name=None):
"""Shortcut for `tf.group(distribution.unwrap(value))`."""
value = nest.flatten(self.unwrap(value))
if len(value) != 1 or name is not None:
return control_flow_ops.group(value, name=name)
# Special handling for the common case of one op.
v, = value
if hasattr(v, "op"):
v = v.op
return v
@property
def is_single_tower(self):
"""Returns whether there is a single tower or multiple.
Returns:
A boolean. If `True`, `call_for_each_tower(fn)` will only call `fn` once.
If `False`, `call_for_each_tower(fn)` may call `fn` multiple times.
"""
raise NotImplementedError("must be implemented in descendants")
@property
def num_towers(self):
"""Returns number of towers, for purposes of averaging across towers."""
raise NotImplementedError("must be implemented in descendants")
@property
def worker_devices(self):
"""Returns the list of devices used to run `call_for_each_tower()` calls."""
# TODO(josh11b): More docstring
raise NotImplementedError("must be implemented in descendants")
@property
def parameter_devices(self):
"""Returns the list of devices used for variable and `update` placement."""
# TODO(josh11b): More docstring
raise NotImplementedError("must be implemented in descendants")
def non_slot_devices(self, var_list):
"""Device(s) for non-slot variables.
Create variables on these devices in a
`with colocate_vars_with(non_slot_devices(...)):` block.
Update those using `update_non_slot()`.
Args:
var_list: The list of variables being optimized, needed with the
default `DistributionStrategy`.
"""
raise NotImplementedError("must be implemented in descendants")
@property
def worker_device_index(self):
"""An object mapping worker device to an id.
This might be passed as an argument to `call_for_each_tower()`, as in:
```
with distribution_strategy.scope():
def fn(device_id):
# device_id is an integer. `fn` is being executed on device:
# distribution_strategy.worker_devices[device_id].
distribution_strategy.call_for_each_tower(
fn, distribution_strategy.worker_device_index)
```
Returns:
An index object, or the integer 0 if there is only a single tower.
"""
_require_cross_tower_context(self)
return self._worker_device_index()
def _worker_device_index(self):
raise NotImplementedError("must be implemented in descendants")
def configure(self, session_config=None):
"""Find the best configuration given a tensorflow session config."""
del session_config
# A note about the difference between the context managers
# `TowerContext` (defined here) and `_CurrentDistributionContext`
# (defined above) used by `DistributionStrategy.scope()`:
#
# * a TowerContext is only present during a `call_for_each_tower()`
# call (except during a `merge_run` call) and in such a scope it
# will be returned by calls to `get_tower_context()`. Implementers of new
# DistributionStrategy descendants will frequently also need to
# define a descendant of TowerContext, and are responsible for
# entering and exiting this context.
#
# * DistributionStrategy.scope() sets up a variable_creator scope that
# changes variable creation calls (e.g. to make mirrored
# variables). This is intended as an outer scope that users enter once
# around their model creation and graph definition. There is no
# anticipated need to define descendants of _CurrentDistributionContext.
# It sets the current DistributionStrategy for purposes of
# `get_distribution_strategy()` and `has_distribution_strategy()`
# and switches the thread mode to a "cross-tower context".
class TowerContext(object):
"""DistributionStrategy API inside a `call_for_each_tower()` call."""
def __init__(self, distribution_strategy, tower_id):
self._distribution_strategy = distribution_strategy
self._thread_context = _InTowerThreadMode(self)
self._tower_id = tower_id
def __enter__(self):
_push_per_thread_mode(self._thread_context)
def __exit__(self, exception_type, exception_value, traceback):
_pop_per_thread_mode()
def merge_call(self, merge_fn, *args, **kwargs):
"""Merge args across towers and run `merge_fn` in a cross-tower context.
This allows communication and coordination when there are multiple calls
to a model function triggered by a call to
`distribution.call_for_each_tower(model_fn, ...)`.
See `MirroredDistribution.call_for_each_tower()` for an explanation.
Otherwise, this is equivalent to:
```
distribution = get_distribution_strategy()
with cross-tower-context(distribution):
return merge_fn(distribution, *args, **kwargs)
```
Args:
merge_fn: function that joins arguments from threads that are given as
PerDevice. It accepts `DistributionStrategy` object as the first
argument.
*args: positional per-thread arguments for `merge_fn`
**kwargs: keyword per-thread arguments for `merge_fn`.
Returns:
The return value of `merge_fn`, except for `PerDevice` values which are
unpacked.
"""
require_tower_context(self)
return self._merge_call(merge_fn, *args, **kwargs)
def _merge_call(self, merge_fn, *args, **kwargs):
"""Default implementation for single tower."""
_push_per_thread_mode( # thread-local, so not needed with multiple threads
_CrossTowerThreadMode(self._distribution_strategy))
try:
return merge_fn(self._distribution_strategy, *args, **kwargs)
finally:
_pop_per_thread_mode()
def tower_local_var_scope(self, reduce_method):
"""Alias for distribution_strategy.tower_local_var_scope()."""
return self._distribution_strategy.tower_local_var_scope(reduce_method)
@property
def is_single_tower(self):
"""Returns whether there is a single tower or multiple."""
require_tower_context(self)
return self._distribution_strategy.is_single_tower
@property
def num_towers(self):
"""Returns number of towers, for purposes of averaging across towers."""
return self._distribution_strategy.num_towers
@property
def tower_id(self):
"""Which tower is being defined, a number from 0 to `num_towers - 1`."""
require_tower_context(self)
return self._tower_id
@property
def distribution_strategy(self):
"""The current `DistributionStrategy` object."""
return self._distribution_strategy
@property
def device(self):
"""The device this tower is to be executed on, as a string."""
require_tower_context(self)
return device_util.current()
# TODO(josh11b): Implement `start_all_reduce(method, t)` that returns
# a function returning the result of reducing `t` across all
# towers. Most likely can be implemented in terms of `merge_call()`
# and `batch_reduce()`.
# ------------------------------------------------------------------------------
class _DefaultDistributionStrategy(DistributionStrategy):
"""Default `DistributionStrategy` if none is explicitly selected."""
def scope(self):
"""Context manager setting a variable creator and `self` as current."""
if has_distribution_strategy():
raise RuntimeError("Must not nest DistributionStrategy scopes.")
def creator(next_creator, *args, **kwargs):
_require_distribution_strategy_scope(self)
kwargs.pop("tower_local_reduce_method", None)
return next_creator(*args, **kwargs)
return _CurrentDistributionContext(
self, variable_scope.variable_creator_scope(creator))
def tower_local_var_scope(self, reduce_method):
"""Does not set to resource variables."""
def create_tower_local_variable(next_creator, *args, **kwargs):
_require_distribution_strategy_scope(self)
kwargs["trainable"] = False
return next_creator(*args, **kwargs)
_require_distribution_strategy_scope(self)
return variable_scope.variable_creator_scope(create_tower_local_variable)
def colocate_vars_with(self, colocate_with_variable):
"""Does not require `self.scope`."""
_require_distribution_strategy_scope(self)
return ops.colocate_with(colocate_with_variable)
def distribute_dataset(self, dataset_fn):
return self._call_dataset_fn(dataset_fn)
def _broadcast(self, tensor, destinations):
if destinations is None:
return tensor
else:
raise NotImplementedError("TODO")
def _call_for_each_tower(self, fn, *args, **kwargs):
# We don't run `fn` in multiple threads in _DefaultDistributionStrategy.
kwargs.pop("run_concurrently", None)
with TowerContext(self, tower_id=0):
return fn(*args, **kwargs)
def _reduce(self, method_string, value, destinations):
# TODO(josh11b): Use destinations?
del method_string, destinations
return value
def _update(self, var, fn, *args, **kwargs):
# TODO(josh11b): Figure out what we should be passing to UpdateContext()
# once that value is used for something.
with ops.colocate_with(var), UpdateContext(var):
return fn(var, *args, **kwargs)
def _update_non_slot(self, colocate_with, fn, *args, **kwargs):
# TODO(josh11b): Figure out what we should be passing to UpdateContext()
# once that value is used for something.
with ops.colocate_with(colocate_with), UpdateContext(colocate_with):
return fn(*args, **kwargs)
def read_var(self, tower_local_var):
return array_ops.identity(tower_local_var)
def _unwrap(self, distributed_value):
return [distributed_value]
@property
def is_single_tower(self):
return True
@property
def num_towers(self):
return 1
@property
def worker_devices(self):
raise RuntimeError(
"worker_devices() method unsupported by _DefaultDistributionStrategy.")
@property
def parameter_devices(self):
raise RuntimeError("parameter_devices() method unsupported by "
"_DefaultDistributionStrategy.")
def non_slot_devices(self, var_list):
return min(var_list, key=lambda x: x.name)
def _worker_device_index(self):
raise RuntimeError("worker_device_index() method unsupported by "
"_DefaultDistributionStrategy.")
# ------------------------------------------------------------------------------
# Common operations
def increment_var(v, amount=1):
"""`v += amount`, distributed-aware version."""
def update(vu):
if isinstance(vu, resource_variable_ops.ResourceVariable):
return vu.assign_add(amount, read_value=False)
else:
return state_ops.assign_add(vu, amount)
def merge_fn(dist, vm):
return dist.group(dist.update(vm, update))
tower_context = get_tower_context()
return tower_context.merge_call(merge_fn, v)
# ------------------------------------------------------------------------------
# Singletons
_default_distribution_strategy = _DefaultDistributionStrategy()
_default_tower_context = TowerContext(
_default_distribution_strategy, tower_id=0)
_default_tower_mode = _DefaultTowerThreadMode()
# ------------------------------------------------------------------------------
# We haven't yet implemented deserialization for DistributedVariables.
# So here we catch any attempts to deserialize variables
# when using distribution strategies.
# pylint: disable=protected-access
_original_from_proto = resource_variable_ops._from_proto_fn
def _from_proto_fn(v, import_scope=None):
if has_distribution_strategy():
raise NotImplementedError(
"Deserialization of variables is not yet supported when using"
"distributed strategies.")
else:
return _original_from_proto(v, import_scope=import_scope)
resource_variable_ops._from_proto_fn = _from_proto_fn
# pylint: enable=protected-access
| apache-2.0 |
mxjl620/scikit-learn | examples/linear_model/plot_sgd_iris.py | 284 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
CI-WATER/tethys | tethys_services/migrations/0001_initial_20.py | 1 | 6102 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-17 14:04
from django.conf import settings
from django.db import migrations, models
import tethys_services.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DatasetService',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
('engine', models.CharField(choices=[('tethys_dataset_services.engines.CkanDatasetEngine', 'CKAN'),
('tethys_dataset_services.engines.HydroShareDatasetEngine',
'HydroShare')],
default='tethys_dataset_services.engines.CkanDatasetEngine',
max_length=200)),
('endpoint', models.CharField(max_length=1024)),
('apikey', models.CharField(blank=True, max_length=100)),
('username', models.CharField(blank=True, max_length=100)),
('password', models.CharField(blank=True, max_length=100)),
],
options={
'verbose_name': 'Dataset Service',
'verbose_name_plural': 'Dataset Services',
},
),
migrations.CreateModel(
name='SpatialDatasetService',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
('engine', models.CharField(choices=[('tethys_dataset_services.engines.GeoServerSpatialDatasetEngine',
'GeoServer')],
default='tethys_dataset_services.engines.GeoServerSpatialDatasetEngine',
max_length=200)),
('endpoint', models.CharField(max_length=1024)),
('apikey', models.CharField(blank=True, max_length=100)),
('username', models.CharField(blank=True, max_length=100)),
('password', models.CharField(blank=True, max_length=100)),
],
options={
'verbose_name': 'Spatial Dataset Service',
'verbose_name_plural': 'Spatial Dataset Services',
},
),
migrations.CreateModel(
name='WebProcessingService',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
('endpoint', models.CharField(max_length=1024)),
('username', models.CharField(blank=True, max_length=100)),
('password', models.CharField(blank=True, max_length=100)),
],
options={
'verbose_name': 'Web Processing Service',
'verbose_name_plural': 'Web Processing Services',
},
),
migrations.AlterField(
model_name='datasetservice',
name='endpoint',
field=models.CharField(max_length=1024,
validators=[tethys_services.models.validate_dataset_service_endpoint]),
),
migrations.AlterField(
model_name='spatialdatasetservice',
name='endpoint',
field=models.CharField(max_length=1024,
validators=[tethys_services.models.validate_spatial_dataset_service_endpoint]),
),
migrations.AlterField(
model_name='webprocessingservice',
name='endpoint',
field=models.CharField(max_length=1024, validators=[tethys_services.models.validate_wps_service_endpoint]),
),
migrations.AddField(
model_name='spatialdatasetservice',
name='public_endpoint',
field=models.CharField(blank=True, max_length=1024,
validators=[tethys_services.models.validate_spatial_dataset_service_endpoint]),
),
migrations.AddField(
model_name='datasetservice',
name='public_endpoint',
field=models.CharField(blank=True, max_length=1024,
validators=[tethys_services.models.validate_dataset_service_endpoint]),
),
migrations.AddField(
model_name='webprocessingservice',
name='public_endpoint',
field=models.CharField(blank=True, max_length=1024,
validators=[tethys_services.models.validate_wps_service_endpoint]),
),
migrations.CreateModel(
name='PersistentStoreService',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
('host', models.CharField(default='localhost', max_length=255)),
('port', models.IntegerField(default=5435,
validators=[tethys_services.models.validate_persistent_store_port])),
('username', models.CharField(blank=True, max_length=100)),
('password', models.CharField(blank=True, max_length=100)),
('engine', models.CharField(choices=[('postgresql', 'PostgreSQL')], default='postgresql',
max_length=50)),
],
options={
'verbose_name': 'Persistent Store Service',
'verbose_name_plural': 'Persistent Store Services',
},
),
]
| bsd-2-clause |
lihuanghai/neon | neon/datasets/mnist.py | 7 | 6530 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
MNIST is a handwritten digit image dataset.
More info at: http://yann.lecun.com/exdb/mnist/
"""
import gzip
import logging
import numpy as np
import os
import struct
from neon.datasets.dataset import Dataset
from neon.util.compat import PY3, range
if PY3:
from urllib.parse import urljoin as basejoin
else:
from urllib import basejoin
logger = logging.getLogger(__name__)
class MNIST(Dataset):
"""
Sets up an MNIST dataset.
Attributes:
raw_base_url (str): where to find the source data
raw_train_input_gz (str): URL of the full path to raw train inputs
raw_train_target_gz (str): URL of the full path to raw train targets
raw_test_input_gz (str): URL of the full path to raw test inputs
raw_test_target_gz (str): URL of the full path to raw test targets
backend (neon.backends.Backend): backend used for this data
inputs (dict): structure housing the loaded train/test/validation
input data
targets (dict): structure housing the loaded train/test/validation
target data
Keyword Args:
repo_path (str, optional): where to locally host this dataset on disk
"""
raw_base_url = 'http://yann.lecun.com/exdb/mnist/'
raw_train_input_gz = basejoin(raw_base_url, 'train-images-idx3-ubyte.gz')
raw_train_target_gz = basejoin(raw_base_url, 'train-labels-idx1-ubyte.gz')
raw_test_input_gz = basejoin(raw_base_url, 't10k-images-idx3-ubyte.gz')
raw_test_target_gz = basejoin(raw_base_url, 't10k-labels-idx1-ubyte.gz')
def __init__(self, **kwargs):
self.num_test_sample = 10000
self.macro_batched = False
self.__dict__.update(kwargs)
def initialize(self):
pass
def read_image_file(self, fname, dtype=None):
"""
Carries out the actual reading of MNIST image files.
"""
with open(fname, 'rb') as f:
magic, num_images, rows, cols = struct.unpack('>iiii', f.read(16))
if magic != 2051:
raise ValueError('invalid MNIST image file: ' + fname)
full_image = np.fromfile(f, dtype='uint8').reshape((num_images,
rows * cols))
if dtype is not None:
dtype = np.dtype(dtype)
full_image = full_image.astype(dtype)
full_image /= 255.
return full_image
def read_label_file(self, fname):
"""
Carries out the actual reading of MNIST label files.
"""
with open(fname, 'rb') as f:
magic, num_labels = struct.unpack('>ii', f.read(8))
if magic != 2049:
raise ValueError('invalid MNIST label file:' + fname)
array = np.fromfile(f, dtype='uint8')
return array
def load(self, backend=None, experiment=None):
if self.inputs['train'] is not None:
return
if 'repo_path' in self.__dict__:
self.repo_path = os.path.expandvars(os.path.expanduser(
self.repo_path))
save_dir = os.path.join(self.repo_path,
self.__class__.__name__)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for url in (self.raw_train_input_gz, self.raw_train_target_gz,
self.raw_test_input_gz, self.raw_test_target_gz):
name = os.path.basename(url).rstrip('.gz')
repo_gz_file = os.path.join(save_dir, name + '.gz')
repo_file = repo_gz_file.rstrip('.gz')
if not os.path.exists(repo_file):
self.download_to_repo(url, save_dir)
with gzip.open(repo_gz_file, 'rb') as infile:
with open(repo_file, 'w') as outfile:
for line in infile:
outfile.write(line)
logger.info('loading: %s', name)
if 'images' in repo_file and 'train' in repo_file:
indat = self.read_image_file(repo_file, 'float32')
# flatten to 1D images
self.inputs['train'] = indat
elif 'images' in repo_file and 't10k' in repo_file:
indat = self.read_image_file(repo_file, 'float32')
self.inputs['test'] = indat[0:self.num_test_sample]
elif 'labels' in repo_file and 'train' in repo_file:
indat = self.read_label_file(repo_file)
# Prep a 1-hot label encoding
tmp = np.zeros((indat.shape[0], 10), dtype=np.float32)
for col in range(10):
tmp[:, col] = indat == col
self.targets['train'] = tmp
elif 'labels' in repo_file and 't10k' in repo_file:
indat = self.read_label_file(
repo_file)[0:self.num_test_sample]
tmp = np.zeros((self.num_test_sample, 10),
dtype=np.float32)
for col in range(10):
tmp[:, col] = indat == col
self.targets['test'] = tmp
else:
logger.error('problems loading: %s', name)
if 'sample_pct' in self.__dict__:
self.sample_training_data()
if hasattr(self, 'validation_pct'):
self.split_set(
self.validation_pct, from_set='train', to_set='validation')
self.format()
else:
raise AttributeError('repo_path not specified in config')
# TODO: try and download and read in directly?
| apache-2.0 |
slrbl/Intrusion-and-anomaly-detection-with-machine-learning | train.py | 1 | 2043 |
# About: Use supervised learning logistic regression classifier to predict intrusion/suspicious activities in http logs
# Author: walid.daboubi@gmail.com
# Version: 2.0 - 2022/08/14
import argparse
import sys
from sklearn import linear_model, tree
from helpers import *
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--training_data', help = 'Training data', required = True)
parser.add_argument('-v', '--testing_data', help = 'Testing data', required = True)
parser.add_argument('-a', '--training_algorithm', help = '"lr" for logistic regression or "dr" for decision tree', required = True)
return vars(parser.parse_args())
args = get_args()
training_data = args['training_data']
testing_data = args['testing_data']
training_algorithm = args['training_algorithm']
# Get training features and labels
training_features, training_labels = load_encoded_data(training_data)
# Get testing features and labels
testing_features, testing_labels = load_encoded_data(testing_data)
# Logistic regression model
if training_algorithm == 'lr':
print("\n\n=-=-=-=-=-=-=- Logistic Regression Classifier -=-=-=-=-=-\n")
attack_classifier = linear_model.LogisticRegression()
# Decision tree model
elif training_algorithm == 'dt':
print("\n\n=-=-=-=-=-=-=- Decision Tree Classifier -=-=-=-=-=-=-=-\n")
attack_classifier = tree.DecisionTreeClassifier()
else:
print('{} is not recognized as a training algorithm'.format(training_algorithm))
try:
# Train the model
attack_classifier.fit(training_features, training_labels)
# Predict
predictions = attack_classifier.predict(testing_features)
print("The precision of the detection model is: " + str(get_accuracy(testing_labels,predictions, 1)) + " %")
# Save the trained classifier
model_location = save_model(attack_classifier,'lr')
print("You model has been saved at {}".format(model_location))
except Exception as e:
print('Something went wrong training the model.\nExiting.', e)
sys.exit(1)
| mit |
GoogleCloudPlatform/ml-on-gcp | example_zoo/tensorflow/models/keras_imagenet_main/official/resnet/resnet_run_loop.py | 2 | 27886 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains utility and supporting functions for ResNet.
This module contains ResNet code which does not directly build layers. This
includes dataset management, hyperparameter and optimizer code, and argument
parsing. Code for defining the ResNet layers can be found in resnet_model.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import multiprocessing
import os
# pylint: disable=g-bad-import-order
from absl import flags
import tensorflow as tf
from tensorflow.contrib.data.python.ops import threadpool
from official.resnet import resnet_model
from official.utils.flags import core as flags_core
from official.utils.export import export
from official.utils.logs import hooks_helper
from official.utils.logs import logger
from official.resnet import imagenet_preprocessing
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
################################################################################
# Functions for input processing.
################################################################################
def process_record_dataset(dataset,
is_training,
batch_size,
shuffle_buffer,
parse_record_fn,
num_epochs=1,
dtype=tf.float32,
datasets_num_private_threads=None,
num_parallel_batches=1):
"""Given a Dataset with raw records, return an iterator over the records.
Args:
dataset: A Dataset representing raw records
is_training: A boolean denoting whether the input is for training.
batch_size: The number of samples per batch.
shuffle_buffer: The buffer size to use when shuffling records. A larger
value results in better randomness, but smaller values reduce startup
time and use less memory.
parse_record_fn: A function that takes a raw record and returns the
corresponding (image, label) pair.
num_epochs: The number of epochs to repeat the dataset.
dtype: Data type to use for images/features.
datasets_num_private_threads: Number of threads for a private
threadpool created for all datasets computation.
num_parallel_batches: Number of parallel batches for tf.data.
Returns:
Dataset of (image, label) pairs ready for iteration.
"""
# Prefetches a batch at a time to smooth out the time taken to load input
# files for shuffling and processing.
dataset = dataset.prefetch(buffer_size=batch_size)
if is_training:
# Shuffles records before repeating to respect epoch boundaries.
dataset = dataset.shuffle(buffer_size=shuffle_buffer)
# Repeats the dataset for the number of epochs to train.
dataset = dataset.repeat(num_epochs)
# Parses the raw records into images and labels.
dataset = dataset.apply(
tf.contrib.data.map_and_batch(
lambda value: parse_record_fn(value, is_training, dtype),
batch_size=batch_size,
num_parallel_batches=num_parallel_batches,
drop_remainder=False))
# Operations between the final prefetch and the get_next call to the iterator
# will happen synchronously during run time. We prefetch here again to
# background all of the above processing work and keep it out of the
# critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE
# allows DistributionStrategies to adjust how many batches to fetch based
# on how many devices are present.
dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
# Defines a specific size thread pool for tf.data operations.
if datasets_num_private_threads:
tf.logging.info('datasets_num_private_threads: %s',
datasets_num_private_threads)
dataset = threadpool.override_threadpool(
dataset,
threadpool.PrivateThreadPool(
datasets_num_private_threads,
display_name='input_pipeline_thread_pool'))
return dataset
def get_synth_input_fn(height, width, num_channels, num_classes,
dtype=tf.float32):
"""Returns an input function that returns a dataset with random data.
This input_fn returns a data set that iterates over a set of random data and
bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
copy is still included. This used to find the upper throughput bound when
tunning the full input pipeline.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
Returns:
An input_fn that can be used in place of a real one to return a dataset
that can be used for iteration.
"""
# pylint: disable=unused-argument
def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
"""Returns dataset filled with random data."""
# Synthetic input should be within [0, 255].
inputs = tf.truncated_normal(
[batch_size] + [height, width, num_channels],
dtype=dtype,
mean=127,
stddev=60,
name='synthetic_inputs')
labels = tf.random_uniform(
[batch_size],
minval=0,
maxval=num_classes - 1,
dtype=tf.int32,
name='synthetic_labels')
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
data = data.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
return data
return input_fn
def image_bytes_serving_input_fn(image_shape, dtype=tf.float32):
"""Serving input fn for raw jpeg images."""
def _preprocess_image(image_bytes):
"""Preprocess a single raw image."""
# Bounding box around the whole image.
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=dtype, shape=[1, 1, 4])
height, width, num_channels = image_shape
image = imagenet_preprocessing.preprocess_image(
image_bytes, bbox, height, width, num_channels, is_training=False)
return image
image_bytes_list = tf.placeholder(
shape=[None], dtype=tf.string, name='input_tensor')
images = tf.map_fn(
_preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
return tf.estimator.export.TensorServingInputReceiver(
images, {'image_bytes': image_bytes_list})
def override_flags_and_set_envars_for_gpu_thread_pool(flags_obj):
"""Override flags and set env_vars for performance.
These settings exist to test the difference between using stock settings
and manual tuning. It also shows some of the ENV_VARS that can be tweaked to
squeeze a few extra examples per second. These settings are defaulted to the
current platform of interest, which changes over time.
On systems with small numbers of cpu cores, e.g. under 8 logical cores,
setting up a gpu thread pool with `tf_gpu_thread_mode=gpu_private` may perform
poorly.
Args:
flags_obj: Current flags, which will be adjusted possibly overriding
what has been set by the user on the command-line.
"""
cpu_count = multiprocessing.cpu_count()
tf.logging.info('Logical CPU cores: %s', cpu_count)
# Sets up thread pool for each GPU for op scheduling.
per_gpu_thread_count = 1
total_gpu_thread_count = per_gpu_thread_count * flags_obj.num_gpus
os.environ['TF_GPU_THREAD_MODE'] = flags_obj.tf_gpu_thread_mode
os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
tf.logging.info('TF_GPU_THREAD_COUNT: %s', os.environ['TF_GPU_THREAD_COUNT'])
tf.logging.info('TF_GPU_THREAD_MODE: %s', os.environ['TF_GPU_THREAD_MODE'])
# Reduces general thread pool by number of threads used for GPU pool.
main_thread_count = cpu_count - total_gpu_thread_count
flags_obj.inter_op_parallelism_threads = main_thread_count
# Sets thread count for tf.data. Logical cores minus threads assign to the
# private GPU pool along with 2 thread per GPU for event monitoring and
# sending / receiving tensors.
num_monitoring_threads = 2 * flags_obj.num_gpus
flags_obj.datasets_num_private_threads = (cpu_count - total_gpu_thread_count
- num_monitoring_threads)
################################################################################
# Functions for running training/eval/validation loops for the model.
################################################################################
def learning_rate_with_decay(
batch_size, batch_denom, num_images, boundary_epochs, decay_rates,
base_lr=0.1, warmup=False):
"""Get a learning rate that decays step-wise as training progresses.
Args:
batch_size: the number of examples processed in each training batch.
batch_denom: this value will be used to scale the base learning rate.
`0.1 * batch size` is divided by this number, such that when
batch_denom == batch_size, the initial learning rate will be 0.1.
num_images: total number of images that will be used for training.
boundary_epochs: list of ints representing the epochs at which we
decay the learning rate.
decay_rates: list of floats representing the decay rates to be used
for scaling the learning rate. It should have one more element
than `boundary_epochs`, and all elements should have the same type.
base_lr: Initial learning rate scaled based on batch_denom.
warmup: Run a 5 epoch warmup to the initial lr.
Returns:
Returns a function that takes a single argument - the number of batches
trained so far (global_step)- and returns the learning rate to be used
for training the next batch.
"""
initial_learning_rate = base_lr * batch_size / batch_denom
batches_per_epoch = num_images / batch_size
# Reduce the learning rate at certain epochs.
# CIFAR-10: divide by 10 at epoch 100, 150, and 200
# ImageNet: divide by 10 at epoch 30, 60, 80, and 90
boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]
vals = [initial_learning_rate * decay for decay in decay_rates]
def learning_rate_fn(global_step):
"""Builds scaled learning rate function with 5 epoch warm up."""
lr = tf.train.piecewise_constant(global_step, boundaries, vals)
if warmup:
warmup_steps = int(batches_per_epoch * 5)
warmup_lr = (
initial_learning_rate * tf.cast(global_step, tf.float32) / tf.cast(
warmup_steps, tf.float32))
return tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)
return lr
return learning_rate_fn
def resnet_model_fn(features, labels, mode, model_class,
resnet_size, weight_decay, learning_rate_fn, momentum,
data_format, resnet_version, loss_scale,
loss_filter_fn=None, dtype=resnet_model.DEFAULT_DTYPE,
fine_tune=False):
"""Shared functionality for different resnet model_fns.
Initializes the ResnetModel representing the model layers
and uses that model to build the necessary EstimatorSpecs for
the `mode` in question. For training, this means building losses,
the optimizer, and the train op that get passed into the EstimatorSpec.
For evaluation and prediction, the EstimatorSpec is returned without
a train op, but with the necessary parameters for the given mode.
Args:
features: tensor representing input images
labels: tensor representing class labels for all input images
mode: current estimator mode; should be one of
`tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`
model_class: a class representing a TensorFlow model that has a __call__
function. We assume here that this is a subclass of ResnetModel.
resnet_size: A single integer for the size of the ResNet model.
weight_decay: weight decay loss rate used to regularize learned variables.
learning_rate_fn: function that returns the current learning rate given
the current global_step
momentum: momentum term used for optimization
data_format: Input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
resnet_version: Integer representing which version of the ResNet network to
use. See README for details. Valid values: [1, 2]
loss_scale: The factor to scale the loss for numerical stability. A detailed
summary is present in the arg parser help text.
loss_filter_fn: function that takes a string variable name and returns
True if the var should be included in loss calculation, and False
otherwise. If None, batch_normalization variables will be excluded
from the loss.
dtype: the TensorFlow dtype to use for calculations.
fine_tune: If True only train the dense layers(final layers).
Returns:
EstimatorSpec parameterized according to the input params and the
current mode.
"""
# Generate a summary node for the images
tf.summary.image('images', features, max_outputs=6)
# Checks that features/images have same data type being used for calculations.
assert features.dtype == dtype
model = model_class(resnet_size, data_format, resnet_version=resnet_version,
dtype=dtype)
logits = model(features, mode == tf.estimator.ModeKeys.TRAIN)
# This acts as a no-op if the logits are already in fp32 (provided logits are
# not a SparseTensor). If dtype is is low precision, logits must be cast to
# fp32 for numerical stability.
logits = tf.cast(logits, tf.float32)
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
# Return the predictions and the specification for serving a SavedModel
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'predict': tf.estimator.export.PredictOutput(predictions)
})
# Calculate loss, which includes softmax cross entropy and L2 regularization.
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name='cross_entropy')
tf.summary.scalar('cross_entropy', cross_entropy)
# If no loss_filter_fn is passed, assume we want the default behavior,
# which is that batch_normalization variables are excluded from loss.
def exclude_batch_norm(name):
return 'batch_normalization' not in name
loss_filter_fn = loss_filter_fn or exclude_batch_norm
# Add weight decay to the loss.
l2_loss = weight_decay * tf.add_n(
# loss is computed using fp32 for numerical stability.
[tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables()
if loss_filter_fn(v.name)])
tf.summary.scalar('l2_loss', l2_loss)
loss = cross_entropy + l2_loss
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
learning_rate = learning_rate_fn(global_step)
# Create a tensor named learning_rate for logging purposes
tf.identity(learning_rate, name='learning_rate')
tf.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=momentum
)
def _dense_grad_filter(gvs):
"""Only apply gradient updates to the final layer.
This function is used for fine tuning.
Args:
gvs: list of tuples with gradients and variable info
Returns:
filtered gradients so that only the dense layer remains
"""
return [(g, v) for g, v in gvs if 'dense' in v.name]
if loss_scale != 1:
# When computing fp16 gradients, often intermediate tensor values are
# so small, they underflow to 0. To avoid this, we multiply the loss by
# loss_scale to make these tensor values loss_scale times bigger.
scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale)
if fine_tune:
scaled_grad_vars = _dense_grad_filter(scaled_grad_vars)
# Once the gradient computation is complete we can scale the gradients
# back to the correct scale before passing them to the optimizer.
unscaled_grad_vars = [(grad / loss_scale, var)
for grad, var in scaled_grad_vars]
minimize_op = optimizer.apply_gradients(unscaled_grad_vars, global_step)
else:
grad_vars = optimizer.compute_gradients(loss)
if fine_tune:
grad_vars = _dense_grad_filter(grad_vars)
minimize_op = optimizer.apply_gradients(grad_vars, global_step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = tf.group(minimize_op, update_ops)
else:
train_op = None
accuracy = tf.metrics.accuracy(labels, predictions['classes'])
accuracy_top_5 = tf.metrics.mean(tf.nn.in_top_k(predictions=logits,
targets=labels,
k=5,
name='top_5_op'))
metrics = {'accuracy': accuracy,
'accuracy_top_5': accuracy_top_5}
# Create a tensor named train_accuracy for logging purposes
tf.identity(accuracy[1], name='train_accuracy')
tf.identity(accuracy_top_5[1], name='train_accuracy_top_5')
tf.summary.scalar('train_accuracy', accuracy[1])
tf.summary.scalar('train_accuracy_top_5', accuracy_top_5[1])
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
def resnet_main(
flags_obj, model_function, input_function, dataset_name, shape=None):
"""Shared main loop for ResNet Models.
Args:
flags_obj: An object containing parsed flags. See define_resnet_flags()
for details.
model_function: the function that instantiates the Model and builds the
ops for train/eval. This will be passed directly into the estimator.
input_function: the function that processes the dataset and returns a
dataset that the estimator can train on. This will be wrapped with
all the relevant flags for running and passed to estimator.
dataset_name: the name of the dataset for training and evaluation. This is
used for logging purpose.
shape: list of ints representing the shape of the images used for training.
This is only used if flags_obj.export_dir is passed.
Returns:
Dict of results of the run.
"""
model_helpers.apply_clean(flags.FLAGS)
# Ensures flag override logic is only executed if explicitly triggered.
if flags_obj.tf_gpu_thread_mode:
override_flags_and_set_envars_for_gpu_thread_pool(flags_obj)
# Creates session config. allow_soft_placement = True, is required for
# multi-GPU and is not harmful for other modes.
session_config = tf.ConfigProto(
inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads,
intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads,
allow_soft_placement=True)
distribution_strategy = distribution_utils.get_distribution_strategy(
flags_core.get_num_gpus(flags_obj), flags_obj.all_reduce_alg)
# Creates a `RunConfig` that checkpoints every 24 hours which essentially
# results in checkpoints determined only by `epochs_between_evals`.
run_config = tf.estimator.RunConfig(
train_distribute=distribution_strategy,
session_config=session_config,
save_checkpoints_secs=60*60*24)
# Initializes model with all but the dense layer from pretrained ResNet.
if flags_obj.pretrained_model_checkpoint_path is not None:
warm_start_settings = tf.estimator.WarmStartSettings(
flags_obj.pretrained_model_checkpoint_path,
vars_to_warm_start='^(?!.*dense)')
else:
warm_start_settings = None
classifier = tf.estimator.Estimator(
model_fn=model_function, model_dir=flags_obj.model_dir, config=run_config,
warm_start_from=warm_start_settings, params={
'resnet_size': int(flags_obj.resnet_size),
'data_format': flags_obj.data_format,
'batch_size': flags_obj.batch_size,
'resnet_version': int(flags_obj.resnet_version),
'loss_scale': flags_core.get_loss_scale(flags_obj),
'dtype': flags_core.get_tf_dtype(flags_obj),
'fine_tune': flags_obj.fine_tune
})
run_params = {
'batch_size': flags_obj.batch_size,
'dtype': flags_core.get_tf_dtype(flags_obj),
'resnet_size': flags_obj.resnet_size,
'resnet_version': flags_obj.resnet_version,
'synthetic_data': flags_obj.use_synthetic_data,
'train_epochs': flags_obj.train_epochs,
}
if flags_obj.use_synthetic_data:
dataset_name = dataset_name + '-synthetic'
benchmark_logger = logger.get_benchmark_logger()
benchmark_logger.log_run_info('resnet', dataset_name, run_params,
test_id=flags_obj.benchmark_test_id)
train_hooks = hooks_helper.get_train_hooks(
flags_obj.hooks,
model_dir=flags_obj.model_dir,
batch_size=flags_obj.batch_size)
def input_fn_train(num_epochs):
return input_function(
is_training=True,
data_dir=flags_obj.data_dir,
batch_size=distribution_utils.per_device_batch_size(
flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
num_epochs=num_epochs,
dtype=flags_core.get_tf_dtype(flags_obj),
datasets_num_private_threads=flags_obj.datasets_num_private_threads,
num_parallel_batches=flags_obj.datasets_num_parallel_batches)
def input_fn_eval():
return input_function(
is_training=False,
data_dir=flags_obj.data_dir,
batch_size=distribution_utils.per_device_batch_size(
flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
num_epochs=1,
dtype=flags_core.get_tf_dtype(flags_obj))
if flags_obj.eval_only or not flags_obj.train_epochs:
# If --eval_only is set, perform a single loop with zero train epochs.
schedule, n_loops = [0], 1
else:
# Compute the number of times to loop while training. All but the last
# pass will train for `epochs_between_evals` epochs, while the last will
# train for the number needed to reach `training_epochs`. For instance if
# train_epochs = 25 and epochs_between_evals = 10
# schedule will be set to [10, 10, 5]. That is to say, the loop will:
# Train for 10 epochs and then evaluate.
# Train for another 10 epochs and then evaluate.
# Train for a final 5 epochs (to reach 25 epochs) and then evaluate.
n_loops = math.ceil(flags_obj.train_epochs / flags_obj.epochs_between_evals)
schedule = [flags_obj.epochs_between_evals for _ in range(int(n_loops))]
schedule[-1] = flags_obj.train_epochs - sum(schedule[:-1]) # over counting.
for cycle_index, num_train_epochs in enumerate(schedule):
tf.logging.info('Starting cycle: %d/%d', cycle_index, int(n_loops))
if num_train_epochs:
classifier.train(input_fn=lambda: input_fn_train(num_train_epochs),
hooks=train_hooks, max_steps=flags_obj.max_train_steps)
tf.logging.info('Starting to evaluate.')
# flags_obj.max_train_steps is generally associated with testing and
# profiling. As a result it is frequently called with synthetic data, which
# will iterate forever. Passing steps=flags_obj.max_train_steps allows the
# eval (which is generally unimportant in those circumstances) to terminate.
# Note that eval will run for max_train_steps each loop, regardless of the
# global_step count.
eval_results = classifier.evaluate(input_fn=input_fn_eval,
steps=flags_obj.max_train_steps)
benchmark_logger.log_evaluation_result(eval_results)
if model_helpers.past_stop_threshold(
flags_obj.stop_threshold, eval_results['accuracy']):
break
if flags_obj.export_dir is not None:
# Exports a saved model for the given classifier.
export_dtype = flags_core.get_tf_dtype(flags_obj)
if flags_obj.image_bytes_as_serving_input:
input_receiver_fn = functools.partial(
image_bytes_serving_input_fn, shape, dtype=export_dtype)
else:
input_receiver_fn = export.build_tensor_serving_input_receiver_fn(
shape, batch_size=flags_obj.batch_size, dtype=export_dtype)
classifier.export_savedmodel(flags_obj.export_dir, input_receiver_fn,
strip_default_attrs=True)
return eval_results
def define_resnet_flags(resnet_size_choices=None):
"""Add flags and validators for ResNet."""
flags_core.define_base()
flags_core.define_performance(num_parallel_calls=False,
tf_gpu_thread_mode=True,
datasets_num_private_threads=True,
datasets_num_parallel_batches=True)
flags_core.define_image()
flags_core.define_benchmark()
flags.adopt_module_key_flags(flags_core)
flags.DEFINE_enum(
name='resnet_version', short_name='rv', default='1',
enum_values=['1', '2'],
help=flags_core.help_wrap(
'Version of ResNet. (1 or 2) See README.md for details.'))
flags.DEFINE_bool(
name='fine_tune', short_name='ft', default=False,
help=flags_core.help_wrap(
'If True do not train any parameters except for the final layer.'))
flags.DEFINE_string(
name='pretrained_model_checkpoint_path', short_name='pmcp', default=None,
help=flags_core.help_wrap(
'If not None initialize all the network except the final layer with '
'these values'))
flags.DEFINE_boolean(
name='eval_only', default=False,
help=flags_core.help_wrap('Skip training and only perform evaluation on '
'the latest checkpoint.'))
flags.DEFINE_boolean(
name='image_bytes_as_serving_input', default=False,
help=flags_core.help_wrap(
'If True exports savedmodel with serving signature that accepts '
'JPEG image bytes instead of a fixed size [HxWxC] tensor that '
'represents the image. The former is easier to use for serving at '
'the expense of image resize/cropping being done as part of model '
'inference. Note, this flag only applies to ImageNet and cannot '
'be used for CIFAR.'))
flags.DEFINE_boolean(
name='turn_off_distribution_strategy', default=False,
help=flags_core.help_wrap('Set to True to not use distribution '
'strategies.'))
choice_kwargs = dict(
name='resnet_size', short_name='rs', default='50',
help=flags_core.help_wrap('The size of the ResNet model to use.'))
if resnet_size_choices is None:
flags.DEFINE_string(**choice_kwargs)
else:
flags.DEFINE_enum(enum_values=resnet_size_choices, **choice_kwargs)
| apache-2.0 |
pkruskal/scikit-learn | examples/cluster/plot_cluster_iris.py | 347 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
mxjl620/scikit-learn | examples/cluster/plot_cluster_iris.py | 347 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
jmouriz/sanaviron | sanaviron/src/3rd/pycha/tests/chart.py | 3 | 9618 | # Copyright(c) 2007-2009 by Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of PyCha.
#
# PyCha is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyCha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with PyCha. If not, see <http://www.gnu.org/licenses/>.
import unittest
import cairo
import pycha.chart
class FunctionsTests(unittest.TestCase):
def test_uniqueIndices(self):
arr = (range(10), range(5), range(20), range(30))
self.assertEqual(pycha.chart.uniqueIndices(arr), range(30))
arr = (range(30), range(20), range(5), range(10))
self.assertEqual(pycha.chart.uniqueIndices(arr), range(30))
arr = (range(4), )
self.assertEqual(pycha.chart.uniqueIndices(arr), range(4))
arr = (range(0), )
self.assertEqual(pycha.chart.uniqueIndices(arr), [])
class AreaTests(unittest.TestCase):
def test_area(self):
area = pycha.chart.Area(10, 20, 100, 300)
self.assertEqual(area.x, 10)
self.assertEqual(area.y, 20)
self.assertEqual(area.w, 100)
self.assertEqual(area.h, 300)
self.assertEqual(area.origin, 0.0)
msg = "<pycha.chart.Area@(10.00, 20.00) 100.00 x 300.00 Origin: 0.00>"
self.assertEqual(str(area), msg)
class OptionTests(unittest.TestCase):
def test_options(self):
opt = pycha.chart.Option(a=1, b=2, c=3)
self.assertEqual(opt.a, opt['a'])
self.assertEqual(opt.b, 2)
self.assertEqual(opt['c'], 3)
opt = pycha.chart.Option({'a': 1, 'b': 2, 'c': 3})
self.assertEqual(opt.a, opt['a'])
self.assertEqual(opt.b, 2)
self.assertEqual(opt['c'], 3)
def test_merge(self):
opt = pycha.chart.Option(a=1, b=2,
c=pycha.chart.Option(d=4, e=5))
self.assertEqual(opt.c.d, 4)
opt.merge(dict(c=pycha.chart.Option(d=7, e=8, f=9)))
self.assertEqual(opt.c.d, 7)
# new attributes not present in original option are not merged
self.assertRaises(AttributeError, getattr, opt.c, 'f')
opt.merge(pycha.chart.Option(a=10, b=20))
self.assertEqual(opt.a, 10)
self.assertEqual(opt.b, 20)
class ChartTests(unittest.TestCase):
def test_init(self):
ch = pycha.chart.Chart(None)
self.assertEqual(ch.resetFlag, False)
self.assertEqual(ch.datasets, [])
self.assertEqual(ch.area, None)
self.assertEqual(ch.minxval, None)
self.assertEqual(ch.maxxval, None)
self.assertEqual(ch.minyval, None)
self.assertEqual(ch.maxyval, None)
self.assertEqual(ch.xscale, 1.0)
self.assertEqual(ch.yscale, 1.0)
self.assertEqual(ch.xrange, None)
self.assertEqual(ch.yrange, None)
self.assertEqual(ch.xticks, [])
self.assertEqual(ch.yticks, [])
self.assertEqual(ch.options, pycha.chart.DEFAULT_OPTIONS)
def test_datasets(self):
ch = pycha.chart.Chart(None)
d1 = ('dataset1', ([0, 0], [1, 2], [2, 1.5]))
d2 = ('dataset2', ([0, 1], [1, 2], [2, 2.4]))
d3 = ('dataset3', ([0, 4], [1, 3], [2, 0.5]))
ch.addDataset((d1, d2, d3))
self.assertEqual(ch._getDatasetsKeys(),
['dataset1', 'dataset2', 'dataset3'])
self.assertEqual(ch._getDatasetsValues(),
[d1[1], d2[1], d3[1]])
def test_options(self):
ch = pycha.chart.Chart(None)
opt = pycha.chart.Option(shouldFill=False)
ch.setOptions(opt)
self.assertEqual(ch.options.shouldFill, False)
opt = {'pieRadius': 0.8}
ch.setOptions(opt)
self.assertEqual(ch.options.pieRadius, 0.8)
def test_reset(self):
ch = pycha.chart.Chart(None, options={'shouldFill': False})
self.assertEqual(ch.resetFlag, False)
self.assertEqual(ch.options.shouldFill, False)
dataset = (('dataset1', ([0, 1], [1, 1])), )
ch.addDataset(dataset)
self.assertEqual(ch._getDatasetsKeys(), ['dataset1'])
ch.reset()
defaultFill = pycha.chart.DEFAULT_OPTIONS.shouldFill
self.assertEqual(ch.options.shouldFill, defaultFill)
self.assertEqual(ch.datasets, [])
self.assertEqual(ch.resetFlag, True)
def test_colorscheme(self):
options = {'colorScheme': {'name': 'gradient',
'args': {'initialColor': '#000000'}}}
ch = pycha.chart.Chart(None, options)
dataset = (('dataset1', ([0, 1], [1, 1])), )
ch.addDataset(dataset)
ch._setColorscheme()
self.assert_(isinstance(ch.colorScheme, dict))
self.assertEqual(ch.colorScheme, {'dataset1': (0.0, 0.0, 0.0)})
options = {'colorScheme': {'name': 'foo'}}
ch = pycha.chart.Chart(None, options)
ch.addDataset(dataset)
self.assertRaises(ValueError, ch._setColorscheme)
def test_updateXY(self):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 500, 500)
opt = {'padding': dict(left=10, right=10, top=10, bottom=10)}
dataset = (
('dataset1', ([0, 1], [1, 1], [2, 3])),
('dataset2', ([0, 2], [1, 0], [3, 4])),
)
ch = pycha.chart.Chart(surface, opt)
ch.addDataset(dataset)
ch._updateXY()
self.assertEqual((ch.area.x, ch.area.y, ch.area.w, ch.area.h),
(10, 10, 480, 480))
self.assertEqual(ch.minxval, 0.0)
self.assertEqual(ch.maxxval, 3)
self.assertEqual(ch.xrange, 3)
self.assertEqual(ch.xscale, 1 / 3.0)
self.assertEqual(ch.minyval, 0)
self.assertEqual(ch.maxyval, 4)
self.assertEqual(ch.yrange, 4)
self.assertEqual(ch.yscale, 1 / 4.0)
# TODO: test with different options (axis.range, ...)
def test_updateTicks(self):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 500, 500)
opt = {'padding': dict(left=10, right=10, top=10, bottom=10)}
dataset = (
('dataset1', ([0, 1], [1, 1], [2, 3])),
('dataset2', ([0, 2], [1, 0], [3, 4])),
)
ch = pycha.chart.Chart(surface, opt)
ch.addDataset(dataset)
ch._updateXY()
ch._updateTicks()
xticks = [(0.0, 0), (1 / 3.0, 1), (2 / 3.0, 2)]
for i in range(len(xticks)):
self.assertAlmostEqual(ch.xticks[i][0], xticks[i][0], 4)
self.assertAlmostEqual(ch.xticks[i][1], xticks[i][1], 4)
yticks = [(1 - 0.1 * i, 0.4 * i)
for i in range(ch.options.axis.y.tickCount + 1)]
self.assertEqual(len(ch.yticks), len(yticks))
for i in range(len(yticks)):
self.assertAlmostEqual(ch.yticks[i][0], yticks[i][0], 4)
self.assertAlmostEqual(ch.yticks[i][1], yticks[i][1], 4)
def _test_updateExplicitTicks(self):
"""Test for bug #7"""
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 500, 500)
yticks = [dict(v=i, label=str(i)) for i in range(0, 3)]
opt = {'axis': {'y': {'ticks': yticks}}}
dataset = (
('dataset1', ([0, 1], [1, 1], [2, 3])),
)
ch = pycha.chart.Chart(surface, opt)
ch.addDataset(dataset)
ch._updateXY()
ch._updateTicks()
self.assertAlmostEqual(ch.yticks[0][0], 1.0, 4)
self.assertAlmostEqual(ch.yticks[1][0], 2 / 3.0, 4)
self.assertAlmostEqual(ch.yticks[2][0], 1 / 3.0, 4)
def test_abstractChart(self):
ch = pycha.chart.Chart(None)
self.assertRaises(NotImplementedError, ch._updateChart)
self.assertRaises(NotImplementedError, ch._renderChart, None)
def test_range(self):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 500, 500)
opt = {'axis': {'x': {'range': (1, 10)}, 'y': {'range': (1.0, 10.0)}}}
ch = pycha.chart.Chart(surface, opt)
dataset = (
('dataset1', ([0, 1], [1, 1], [2, 3])),
)
ch.addDataset(dataset)
ch._updateXY()
self.assertAlmostEqual(ch.xrange, 9, 4)
self.assertAlmostEqual(ch.yrange, 9, 4)
self.assertAlmostEqual(ch.xscale, 0.1111, 4)
self.assertAlmostEqual(ch.yscale, 0.1111, 4)
def test_interval(self):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 500, 500)
opt = {'axis': {'y': {'interval': 2.5}}}
ch = pycha.chart.Chart(surface, opt)
dataset = (
('dataset1', ([0, 1], [1, 4], [2, 10])),
)
ch.addDataset(dataset)
ch._updateXY()
ch._updateTicks()
yticks = ((0.75, 2.5), (0.5, 5.0),
(0.25, 7.5), (0.0, 10.0))
self.assertEqual(len(yticks), len(ch.yticks))
for i, (pos, label) in enumerate(yticks):
tick = ch.yticks[i]
self.assertAlmostEqual(tick[0], pos, 2)
self.assertAlmostEqual(tick[1], label, 2)
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(FunctionsTests),
unittest.makeSuite(AreaTests),
unittest.makeSuite(OptionTests),
unittest.makeSuite(ChartTests),
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| apache-2.0 |
mxjl620/scikit-learn | sklearn/__check_build/__init__.py | 342 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
mxjl620/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 228 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <robertlayton@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
andrewcmyers/tensorflow | tensorflow/examples/learn/mnist.py | 28 | 4770 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
N_DIGITS = 10 # Number of digits.
X_FEATURE = 'x' # Name of the input feature.
def conv_model(features, labels, mode):
"""2-layer convolution model."""
# Reshape feature to 4d tensor with 2nd and 3rd dimensions being
# image width and height final dimension being the number of color channels.
feature = tf.reshape(features[X_FEATURE], [-1, 28, 28, 1])
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = tf.layers.conv2d(
feature,
filters=32,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
h_pool1 = tf.layers.max_pooling2d(
h_conv1, pool_size=2, strides=2, padding='same')
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
h_conv2 = tf.layers.conv2d(
h_pool1,
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu)
h_pool2 = tf.layers.max_pooling2d(
h_conv2, pool_size=2, strides=2, padding='same')
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# Densely connected layer with 1024 neurons.
h_fc1 = tf.layers.dense(h_pool2_flat, 1024, activation=tf.nn.relu)
if mode == tf.estimator.ModeKeys.TRAIN:
h_fc1 = tf.layers.dropout(h_fc1, rate=0.5)
# Compute logits (1 per class) and compute loss.
logits = tf.layers.dense(h_fc1, N_DIGITS, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
onehot_labels = tf.one_hot(tf.cast(labels, tf.int32), N_DIGITS, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_args):
### Download and load MNIST dataset.
mnist = tf.contrib.learn.datasets.DATASETS['mnist']('/tmp/mnist')
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: mnist.train.images},
y=mnist.train.labels.astype(np.int32),
batch_size=100,
num_epochs=None,
shuffle=True)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: mnist.train.images},
y=mnist.train.labels.astype(np.int32),
num_epochs=1,
shuffle=False)
### Linear classifier.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=mnist.train.images.shape[1:])]
classifier = tf.estimator.LinearClassifier(
feature_columns=feature_columns, n_classes=N_DIGITS)
classifier.train(input_fn=train_input_fn, steps=200)
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (LinearClassifier): {0:f}'.format(scores['accuracy']))
### Convolutional network
classifier = tf.estimator.Estimator(model_fn=conv_model)
classifier.train(input_fn=train_input_fn, steps=200)
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (conv_model): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
yonglehou/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 228 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
mxjl620/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 255 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
FakeNewsChallenge/fnc-1-baseline | feature_engineering.py | 4 | 6253 | import os
import re
import nltk
import numpy as np
from sklearn import feature_extraction
from tqdm import tqdm
_wnl = nltk.WordNetLemmatizer()
def normalize_word(w):
return _wnl.lemmatize(w).lower()
def get_tokenized_lemmas(s):
return [normalize_word(t) for t in nltk.word_tokenize(s)]
def clean(s):
# Cleans a string: Lowercasing, trimming, removing non-alphanumeric
return " ".join(re.findall(r'\w+', s, flags=re.UNICODE)).lower()
def remove_stopwords(l):
# Removes stopwords from a list of tokens
return [w for w in l if w not in feature_extraction.text.ENGLISH_STOP_WORDS]
def gen_or_load_feats(feat_fn, headlines, bodies, feature_file):
if not os.path.isfile(feature_file):
feats = feat_fn(headlines, bodies)
np.save(feature_file, feats)
return np.load(feature_file)
def word_overlap_features(headlines, bodies):
X = []
for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):
clean_headline = clean(headline)
clean_body = clean(body)
clean_headline = get_tokenized_lemmas(clean_headline)
clean_body = get_tokenized_lemmas(clean_body)
features = [
len(set(clean_headline).intersection(clean_body)) / float(len(set(clean_headline).union(clean_body)))]
X.append(features)
return X
def refuting_features(headlines, bodies):
_refuting_words = [
'fake',
'fraud',
'hoax',
'false',
'deny', 'denies',
# 'refute',
'not',
'despite',
'nope',
'doubt', 'doubts',
'bogus',
'debunk',
'pranks',
'retract'
]
X = []
for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):
clean_headline = clean(headline)
clean_headline = get_tokenized_lemmas(clean_headline)
features = [1 if word in clean_headline else 0 for word in _refuting_words]
X.append(features)
return X
def polarity_features(headlines, bodies):
_refuting_words = [
'fake',
'fraud',
'hoax',
'false',
'deny', 'denies',
'not',
'despite',
'nope',
'doubt', 'doubts',
'bogus',
'debunk',
'pranks',
'retract'
]
def calculate_polarity(text):
tokens = get_tokenized_lemmas(text)
return sum([t in _refuting_words for t in tokens]) % 2
X = []
for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):
clean_headline = clean(headline)
clean_body = clean(body)
features = []
features.append(calculate_polarity(clean_headline))
features.append(calculate_polarity(clean_body))
X.append(features)
return np.array(X)
def ngrams(input, n):
input = input.split(' ')
output = []
for i in range(len(input) - n + 1):
output.append(input[i:i + n])
return output
def chargrams(input, n):
output = []
for i in range(len(input) - n + 1):
output.append(input[i:i + n])
return output
def append_chargrams(features, text_headline, text_body, size):
grams = [' '.join(x) for x in chargrams(" ".join(remove_stopwords(text_headline.split())), size)]
grams_hits = 0
grams_early_hits = 0
grams_first_hits = 0
for gram in grams:
if gram in text_body:
grams_hits += 1
if gram in text_body[:255]:
grams_early_hits += 1
if gram in text_body[:100]:
grams_first_hits += 1
features.append(grams_hits)
features.append(grams_early_hits)
features.append(grams_first_hits)
return features
def append_ngrams(features, text_headline, text_body, size):
grams = [' '.join(x) for x in ngrams(text_headline, size)]
grams_hits = 0
grams_early_hits = 0
for gram in grams:
if gram in text_body:
grams_hits += 1
if gram in text_body[:255]:
grams_early_hits += 1
features.append(grams_hits)
features.append(grams_early_hits)
return features
def hand_features(headlines, bodies):
def binary_co_occurence(headline, body):
# Count how many times a token in the title
# appears in the body text.
bin_count = 0
bin_count_early = 0
for headline_token in clean(headline).split(" "):
if headline_token in clean(body):
bin_count += 1
if headline_token in clean(body)[:255]:
bin_count_early += 1
return [bin_count, bin_count_early]
def binary_co_occurence_stops(headline, body):
# Count how many times a token in the title
# appears in the body text. Stopwords in the title
# are ignored.
bin_count = 0
bin_count_early = 0
for headline_token in remove_stopwords(clean(headline).split(" ")):
if headline_token in clean(body):
bin_count += 1
bin_count_early += 1
return [bin_count, bin_count_early]
def count_grams(headline, body):
# Count how many times an n-gram of the title
# appears in the entire body, and intro paragraph
clean_body = clean(body)
clean_headline = clean(headline)
features = []
features = append_chargrams(features, clean_headline, clean_body, 2)
features = append_chargrams(features, clean_headline, clean_body, 8)
features = append_chargrams(features, clean_headline, clean_body, 4)
features = append_chargrams(features, clean_headline, clean_body, 16)
features = append_ngrams(features, clean_headline, clean_body, 2)
features = append_ngrams(features, clean_headline, clean_body, 3)
features = append_ngrams(features, clean_headline, clean_body, 4)
features = append_ngrams(features, clean_headline, clean_body, 5)
features = append_ngrams(features, clean_headline, clean_body, 6)
return features
X = []
for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):
X.append(binary_co_occurence(headline, body)
+ binary_co_occurence_stops(headline, body)
+ count_grams(headline, body))
return X
| apache-2.0 |
yonglehou/scikit-learn | examples/svm/plot_svm_scale_c.py | 222 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
# Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
pkruskal/scikit-learn | examples/svm/plot_svm_scale_c.py | 222 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
# Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
GoogleCloudPlatform/public-datasets-pipelines | datasets/new_york_trees/pipelines/tree_census_2005/tree_census_2005_dag.py | 1 | 15071 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.cncf.kubernetes.operators import kubernetes_pod
from airflow.providers.google.cloud.transfers import gcs_to_bigquery
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-03-01",
}
with DAG(
dag_id="new_york_trees.tree_census_2005",
default_args=default_args,
max_active_runs=1,
schedule_interval="@daily",
catchup=False,
default_view="graph",
) as dag:
# Run CSV transform within kubernetes pod
tree_census_2005_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="tree_census_2005_transform_csv",
startup_timeout_seconds=600,
name="tree_census_2005",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.new_york_trees.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "https://data.cityofnewyork.us/api/views/29bw-z7pj/rows.csv",
"SOURCE_FILE": "files/data.csv",
"TARGET_FILE": "files/data_output.csv",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/new_york_trees/tree_census_2005/data_output.csv",
"PIPELINE_NAME": "tree_census_2005",
"CSV_HEADERS": '["objectid","cen_year","tree_dbh","tree_loc","pit_type","soil_lvl","status","spc_latin", "spc_common","vert_other","vert_pgrd","vert_tgrd","vert_wall","horz_blck","horz_grate", "horz_plant","horz_other","sidw_crack","sidw_raise","wire_htap","wire_prime", "wire_2nd","wire_other","inf_canopy","inf_guard","inf_wires","inf_paving","inf_outlet", "inf_shoes","inf_lights","inf_other","trunk_dmg","zipcode","zip_city","cb_num","borocode", "boroname","cncldist","st_assem","st_senate","nta","nta_name","boro_ct","x_sp","y_sp", "objectid_1","location_1","state","latitude","longitude","census_tract","bin","bbl","address"]',
"RENAME_MAPPINGS": '{"OBJECTID":"objectid","Location 1":"location_1","census tract":"census_tract"}',
"INTEGER_STRING_COL": '["cb_num", "borocode", "cncldist", "st_assem", "st_senate", "boro_ct","x_sp","y_sp", "objectid_1","census_tract","bin","bbl"]',
},
resources={
"request_memory": "3G",
"request_cpu": "1",
"request_ephemeral_storage": "5G",
},
)
# Task to load CSV data to a BigQuery table
load_tree_census_2005_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_tree_census_2005_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=["data/new_york_trees/tree_census_2005/data_output.csv"],
source_format="CSV",
destination_project_dataset_table="new_york_trees.tree_census_2005",
skip_leading_rows=1,
allow_quoted_newlines=True,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "objectid",
"type": "integer",
"description": "",
"mode": "required",
},
{
"name": "cen_year",
"type": "integer",
"description": "This is the year the tree was inventoried in. Data collection for the 2005 census spanned multiple seasons. Data is in YYYY format.",
"mode": "nullable",
},
{
"name": "tree_dbh",
"type": "integer",
"description": "The diameter of the tree in whole inches, measured at breast height. (4.5 feet from the ground.)",
"mode": "nullable",
},
{
"name": "tree_loc",
"type": "string",
"description": "Establishes the location of the tree in relation to the address provided",
"mode": "nullable",
},
{
"name": "pit_type",
"type": "string",
"description": "",
"mode": "nullable",
},
{
"name": "soil_lvl",
"type": "string",
"description": "",
"mode": "nullable",
},
{
"name": "status",
"type": "string",
"description": "Excellent: full, well balanced crown and limb structure; leaves normal size color; no dead or broken branches; trunk solid; bark intact. Good: crown uneven or misshapen; some mechanical damage to bark or trunk; some signs of insects or disease; leaves somewhat below normal size and quantity; some dead or broken branches (less than half of the tree). Poor: large dead limbs with over one- half of the tree already dead or removed; large cavities; drastic deformities; leaves significantly below normal size and quantity; severe insect or disease damage. Dead: dead tree; leaves absent; twigs brittle. Shaft: all branches removed; trunk left standing; sprouts may or may not be evident. Stump: stump shorter than breast height; leaves entirely absent or present only on stump sprouts Empty pit: Pit contains exposed soil and no tree",
"mode": "nullable",
},
{
"name": "spc_latin",
"type": "string",
"description": "The scientific name of the species.",
"mode": "nullable",
},
{
"name": "spc_common",
"type": "string",
"description": "The common name of the species.",
"mode": "nullable",
},
{
"name": "vert_other",
"type": "boolean",
"description": "Other Vertical Treatment Present",
"mode": "nullable",
},
{
"name": "vert_pgrd",
"type": "boolean",
"description": "Perimeter guard present",
"mode": "nullable",
},
{
"name": "vert_tgrd",
"type": "boolean",
"description": "Tall guard present",
"mode": "nullable",
},
{
"name": "vert_wall",
"type": "boolean",
"description": "Walled tree well present",
"mode": "nullable",
},
{
"name": "horz_blck",
"type": "boolean",
"description": "Block pavers present",
"mode": "nullable",
},
{
"name": "horz_grate",
"type": "boolean",
"description": "Tree grates present",
"mode": "nullable",
},
{
"name": "horz_plant",
"type": "boolean",
"description": "Plantings present",
"mode": "nullable",
},
{
"name": "horz_other",
"type": "boolean",
"description": "Other horizontal treatment present",
"mode": "nullable",
},
{
"name": "sidw_crack",
"type": "boolean",
"description": "Cracked sidewalk present",
"mode": "nullable",
},
{
"name": "sidw_raise",
"type": "boolean",
"description": "Raised sidewalk present",
"mode": "nullable",
},
{
"name": "wire_htap",
"type": "boolean",
"description": "Indicates the presence of house tap wires",
"mode": "nullable",
},
{
"name": "wire_prime",
"type": "boolean",
"description": "Indicates the presence of primary wires",
"mode": "nullable",
},
{
"name": "wire_2nd",
"type": "boolean",
"description": "Indicates the presence of secondary wires",
"mode": "nullable",
},
{
"name": "wire_other",
"type": "boolean",
"description": "Indicates the presence of other wires",
"mode": "nullable",
},
{
"name": "inf_canopy",
"type": "boolean",
"description": "Canopy debris present",
"mode": "nullable",
},
{
"name": "inf_guard",
"type": "boolean",
"description": "Choking guard or grate present",
"mode": "nullable",
},
{
"name": "inf_wires",
"type": "boolean",
"description": "Choking wires present",
"mode": "nullable",
},
{
"name": "inf_paving",
"type": "boolean",
"description": "Close paving present",
"mode": "nullable",
},
{
"name": "inf_outlet",
"type": "boolean",
"description": "Electrical outlet present",
"mode": "nullable",
},
{
"name": "inf_shoes",
"type": "boolean",
"description": "Sneakers present",
"mode": "nullable",
},
{
"name": "inf_lights",
"type": "boolean",
"description": "Tree lights present",
"mode": "nullable",
},
{
"name": "inf_other",
"type": "boolean",
"description": "Other infrastructure conflicts present",
"mode": "nullable",
},
{
"name": "trunk_dmg",
"type": "string",
"description": "Describes specific damage or wounds found on the trunk",
"mode": "nullable",
},
{
"name": "zipcode",
"type": "string",
"description": "2005 zipcode that the tree falls in.",
"mode": "nullable",
},
{
"name": "zip_city",
"type": "string",
"description": "City, as derived from the zipcode",
"mode": "nullable",
},
{
"name": "cb_num",
"type": "integer",
"description": "Community Board that the tree falls in.",
"mode": "nullable",
},
{
"name": "borocode",
"type": "integer",
"description": "Borough tree is in, using a one-digit borough code: 1 – Manhattan, 2 – Bronx, 3 – Brooklyn, 4 – Queens, 5 – Staten Island",
"mode": "nullable",
},
{
"name": "boroname",
"type": "string",
"description": "Borough tree is in, full text",
"mode": "nullable",
},
{
"name": "cncldist",
"type": "integer",
"description": "New York City Council District tree point is in.",
"mode": "nullable",
},
{
"name": "st_assem",
"type": "integer",
"description": "State Assembly District tree point is in.",
"mode": "nullable",
},
{
"name": "st_senate",
"type": "integer",
"description": "State Senate District tree point is in.",
"mode": "nullable",
},
{
"name": "nta",
"type": "string",
"description": "nta code for the neighborhood tabulation area the tree point is in, from the 2010 census",
"mode": "nullable",
},
{
"name": "nta_name",
"type": "string",
"description": "Nta name for the neighborhood tabulation area the tree point is in",
"mode": "nullable",
},
{
"name": "boro_ct",
"type": "integer",
"description": "This is the boro_ct identifier for the census tract that the tree point falls into.",
"mode": "nullable",
},
{
"name": "x_sp",
"type": "integer",
"description": "X field",
"mode": "nullable",
},
{
"name": "y_sp",
"type": "integer",
"description": "y field",
"mode": "nullable",
},
{
"name": "objectid_1",
"type": "integer",
"description": "",
"mode": "nullable",
},
{
"name": "location_1",
"type": "string",
"description": "",
"mode": "nullable",
},
{"name": "state", "type": "string", "description": "", "mode": "nullable"},
{
"name": "latitude",
"type": "float",
"description": "",
"mode": "nullable",
},
{
"name": "longitude",
"type": "float",
"description": "",
"mode": "nullable",
},
{
"name": "census_tract",
"type": "integer",
"description": "",
"mode": "nullable",
},
{"name": "bin", "type": "integer", "description": "", "mode": "nullable"},
{"name": "bbl", "type": "integer", "description": "", "mode": "nullable"},
{
"name": "address",
"type": "string",
"description": "",
"mode": "nullable",
},
],
)
tree_census_2005_transform_csv >> load_tree_census_2005_to_bq
| apache-2.0 |
dimkal/mne-python | examples/visualization/plot_evoked_topomap.py | 18 | 1498 | """
========================================
Plotting topographic maps of evoked data
========================================
Load evoked data and plot topomaps for selected time points.
"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
# Tal Linzen <linzen@nyu.edu>
# Denis A. Engeman <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds
print(__doc__)
path = sample.data_path()
fname = path + '/MEG/sample/sample_audvis-ave.fif'
# load evoked and subtract baseline
condition = 'Left Auditory'
evoked = read_evokeds(fname, condition=condition, baseline=(None, 0))
# set time instants in seconds (from 50 to 150ms in a step of 10ms)
times = np.arange(0.05, 0.15, 0.01)
# If times is set to None only 10 regularly spaced topographies will be shown
# plot magnetometer data as topomaps
evoked.plot_topomap(times, ch_type='mag')
# compute a 50 ms bin to stabilize topographies
evoked.plot_topomap(times, ch_type='mag', average=0.05)
# plot gradiometer data (plots the RMS for each pair of gradiometers)
evoked.plot_topomap(times, ch_type='grad')
# plot magnetometer data as topomap at 1 time point : 100 ms
# and add channel labels and title
evoked.plot_topomap(0.1, ch_type='mag', show_names=True, colorbar=False,
size=6, res=128, title='Auditory response')
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.88)
| bsd-3-clause |
tbullmann/heuhaufen | tools/evaluate.py | 1 | 7259 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
import os
import argparse
import glob
from sklearn.metrics.cluster import adjusted_rand_score, adjusted_mutual_info_score
import pandas as pd
import numpy as np
from skimage.io import imread
from skimage.measure import label as regions
from scipy.sparse import csr_matrix
from matplotlib import pyplot as plt
def segmentation_metrics(true_label, pred_label):
"""
Implements pixel wise classic (adjusted for chance) RAND and MI score.
"""
RAND_label = adjusted_rand_score(true_label.ravel(), pred_label.ravel())
MI_label = adjusted_mutual_info_score(true_label.ravel(), pred_label.ravel())
return RAND_label, MI_label
def SNEMI3D_metrics(true_segm, pred_segm):
"""
Implements segmentation wise scores from the SNEMI3D challenge.
"""
n = true_segm.size
overlap = Counter(zip(true_segm.ravel(), pred_segm.ravel()))
data = overlap.values()
row_ind, col_ind = zip(*overlap.keys())
p_ij = csr_matrix((data, (row_ind, col_ind)))
a_i = np.array(p_ij[1:, :].sum(axis=1))
b_j = np.array(p_ij[1:, 1:].sum(axis=0))
p_i0 = p_ij[1:, 0]
p_ij = p_ij[1:, 1:]
sumA = (a_i * a_i).sum()
sumB = (b_j * b_j).sum() + p_i0.sum()/n
sumAB = p_ij.multiply(p_ij).sum() + p_i0.sum()/n
RAND_index = 1 - (sumA + sumB - 2*sumAB) / (n ** 2)
precision = sumAB / sumB
recall = sumAB / sumA
F_score = 2.0 * precision * recall / (precision + recall)
adapted_RAND_error = 1.0 - F_score
return RAND_index, precision, recall, F_score, adapted_RAND_error
def test():
inp_path = '../temp/Example_2D_3Labels/test/images/02-inputs.png'
pred_path = '../temp/Example_2D_3Labels/test/images/02-outputs.png'
true_path = '../temp/Example_2D_3Labels/test/images/02-targets.png'
print ('Evaluate prediction %s vs truth %s' % (pred_path, true_path))
channel = 0
threshold = 0.5
segment_by = 0
true_label = imread(true_path)[:, :, channel] > threshold
pred_label = imread(pred_path)[:, :, channel] > threshold
# scores on labels
RAND_label, MI_label = segmentation_metrics(true_label, pred_label)
print("RAND_label = %1.3f, MI_label =%1.3f\n" % (RAND_label, MI_label))
# scores on segmentation into regions
true_segm = regions(true_label, background=segment_by)
pred_segm = regions(pred_label, background=segment_by)
RAND, precision, recall, F_score, adapted_RAND_error = SNEMI3D_metrics(true_segm, pred_segm)
print("RAND = %1.3f, precision = %1.3f, recall = %1.3f, F_score = %1.3f, adapted_RAND_error = %1.3f"
% (RAND, precision, recall, F_score, adapted_RAND_error))
plt.figure(figsize=(8,5.5))
plt.subplot(231)
plt.imshow(imread(inp_path, as_grey=True), cmap='gray')
plt.title("input")
plt.axis('off')
plt.subplot(232)
plt.imshow(pred_label, cmap='gray')
plt.title("predicted label")
plt.axis('off')
plt.subplot(233)
plt.imshow(true_label, cmap='gray')
plt.title("true label")
plt.axis('off')
plt.subplot(235)
plt.imshow(pred_segm)
plt.title("predicted segmentation")
plt.axis('off')
plt.subplot(236)
plt.imshow(true_segm)
plt.title("true segmentation")
plt.axis('off')
plt.subplots_adjust(left=0.01, right=0.99, top=0.95, bottom=0.01)
plt.show()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input", default=None, help="path/files for raw images (for plot)")
parser.add_argument("--predicted", required=True, help="path/files for predicted labels")
parser.add_argument("--true", required=True, help="path/files for true labels")
parser.add_argument("--output", required=True, help="output path/files")
parser.add_argument("--threshold", type=int, default=127, help="threshold for the predicted label")
parser.add_argument("--channel", type=int, default=0, help="channel to be evaluated")
parser.add_argument("--segment_by", type=int, default=0, help="border value for segmentation into regions")
parser.add_argument("--plot", default="nothing", choices=["nothing", "show", "save"])
a = parser.parse_args()
dst = []
output_path = a.output
def relpath(image_path):
return os.path.relpath(image_path, os.path.split(output_path)[0])
inp_paths = sorted(glob.glob(a.input)) if a.input else []
pred_paths = sorted(glob.glob(a.predicted))
true_paths = sorted(glob.glob(a.true))
for index, (inp_path, pred_path, true_path) in enumerate(map(None, inp_paths, pred_paths, true_paths)):
print ('Evaluate prediction %s vs truth %s' % (pred_path, true_path))
# load iamges, e.g. 0 = black = membrane, 1 = white = non-membrane
# threshold with default 0.5, so that 1 = membrane/border and 0 is non-membrane/region
true_label = imread(true_path)[:, :, a.channel] > a.threshold
pred_label = imread(pred_path)[:, :, a.channel] > a.threshold
# scores on labels
RAND_label, MI_label = segmentation_metrics(true_label, pred_label)
print("RAND_label = %1.3f, MI_label =%1.3f\n" % (RAND_label, MI_label))
#scores on segmentation into regions
true_segm = regions(true_label, background=a.segment_by)
pred_segm = regions(pred_label, background=a.segment_by)
RAND, precision, recall, F_score, adapted_RAND_error = SNEMI3D_metrics(true_segm, pred_segm)
print("RAND = %1.3f, precision = %1.3f, recall = %1.3f, F_score = %1.3f, adapted_RAND_error = %1.3f"
% (RAND, precision, recall, F_score, adapted_RAND_error))
dst.append([relpath(pred_path), relpath(true_path), RAND_label, MI_label, RAND, precision, recall, F_score, adapted_RAND_error])
# plotting
if not a.plot == "nothing":
plt.figure(figsize=(8, 5.5))
if inp_path:
plt.subplot(231)
plt.imshow(imread(inp_path, as_grey=True), cmap='gray')
plt.title("input")
plt.axis('off')
plt.subplot(232)
plt.imshow(pred_label, cmap='gray')
plt.title("predicted label")
plt.axis('off')
plt.subplot(233)
plt.imshow(true_label, cmap='gray')
plt.title("true label")
plt.axis('off')
plt.subplot(235)
plt.imshow(pred_segm)
plt.title("predicted segmentation")
plt.axis('off')
plt.subplot(236)
plt.imshow(true_segm)
plt.title("true segmentation")
plt.axis('off')
plt.subplots_adjust(left=0.01, right=0.99, top=0.95, bottom=0.01)
if a.plot == "save":
plt.savefig(output_path+"-sample%d.jpg" % index)
elif a.plot == "show":
plt.show()
dst = pd.DataFrame(dst,
columns=['pred_path', 'true_path', 'RAND_label', 'MI_label', 'RAND', 'precision', 'recall', 'F_score', 'adapted_RAND_error'])
dst['sample'] = dst.index
dst.to_csv(output_path, index=False)
print ("Saved to %s" % output_path)
# test()
main()
| mit |
davidam/python-examples | scikit/plot_cluster_comparison.py | 45 | 6620 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example shows characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. With the exception of the last dataset,
the parameters of each of these dataset-algorithm pairs
has been tuned to produce good clustering results. Some
algorithms are more sensitive to parameter values than
others.
The last dataset is an example of a 'null' situation for
clustering: the data is homogeneous, and there is no good
clustering. For this example, the null dataset uses the
same parameters as the dataset in the row above it, which
represents a mismatch in the parameter values and the
data structure.
While these examples give some intuition about the
algorithms, this intuition might not apply to very high
dimensional data.
"""
print(__doc__)
import time
import warnings
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets, mixture
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
from itertools import cycle, islice
np.random.seed(0)
# ============
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
# ============
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
# Anisotropicly distributed data
random_state = 170
X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state)
transformation = [[0.6, -0.6], [-0.4, 0.8]]
X_aniso = np.dot(X, transformation)
aniso = (X_aniso, y)
# blobs with varied variances
varied = datasets.make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
# ============
# Set up cluster parameters
# ============
plt.figure(figsize=(9 * 2 + 3, 12.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
default_base = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3}
datasets = [
(noisy_circles, {'damping': .77, 'preference': -240,
'quantile': .2, 'n_clusters': 2}),
(noisy_moons, {'damping': .75, 'preference': -220, 'n_clusters': 2}),
(varied, {'eps': .18, 'n_neighbors': 2}),
(aniso, {'eps': .15, 'n_neighbors': 2}),
(blobs, {}),
(no_structure, {})]
for i_dataset, (dataset, algo_params) in enumerate(datasets):
# update parameters with dataset-specific values
params = default_base.copy()
params.update(algo_params)
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=params['quantile'])
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(
X, n_neighbors=params['n_neighbors'], include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# ============
# Create cluster objects
# ============
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=params['n_clusters'])
ward = cluster.AgglomerativeClustering(
n_clusters=params['n_clusters'], linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(
n_clusters=params['n_clusters'], eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=params['eps'])
affinity_propagation = cluster.AffinityPropagation(
damping=params['damping'], preference=params['preference'])
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock",
n_clusters=params['n_clusters'], connectivity=connectivity)
birch = cluster.Birch(n_clusters=params['n_clusters'])
gmm = mixture.GaussianMixture(
n_components=params['n_clusters'], covariance_type='full')
clustering_algorithms = (
('MiniBatchKMeans', two_means),
('AffinityPropagation', affinity_propagation),
('MeanShift', ms),
('SpectralClustering', spectral),
('Ward', ward),
('AgglomerativeClustering', average_linkage),
('DBSCAN', dbscan),
('Birch', birch),
('GaussianMixture', gmm)
)
for name, algorithm in clustering_algorithms:
t0 = time.time()
# catch warnings related to kneighbors_graph
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="the number of connected components of the " +
"connectivity matrix is [0-9]{1,2}" +
" > 1. Completing it to avoid stopping the tree early.",
category=UserWarning)
warnings.filterwarnings(
"ignore",
message="Graph is not fully connected, spectral embedding" +
" may not work as expected.",
category=UserWarning)
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
plt.subplot(len(datasets), len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']),
int(max(y_pred) + 1))))
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[y_pred])
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| gpl-3.0 |
GoogleCloudPlatform/public-datasets-pipelines | datasets/idc/pipelines/_images/generate_bq_views/script.py | 1 | 4037 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import typing
import google.api_core
import google.auth
import google.auth.impersonated_credentials
from google.cloud import bigquery
BQ_OAUTH_SCOPE = "https://www.googleapis.com/auth/bigquery"
def main(
source_project: str,
target_project: str,
bq_datasets: typing.List[str],
service_account: str,
):
default_creds, pid = google.auth.default()
print(f"Obtained default credentials for the project {pid}")
credentials = google.auth.impersonated_credentials.Credentials(
source_credentials=default_creds,
target_principal=service_account,
target_scopes=[BQ_OAUTH_SCOPE],
)
client = bigquery.Client(credentials=credentials)
for dataset in bq_datasets:
logging.info(f"Generating views for {dataset}..")
tables = client.list_tables(f"{source_project}.{dataset}")
source_views = []
for table in tables:
if table.table_type == "TABLE":
continue
source_view = client.get_table(
f"{source_project}.{dataset}.{table.table_id}"
)
create_or_update_view(client, source_view, source_project, target_project)
source_views.append(table.table_id)
sync_views(client, dataset, source_views, target_project)
def create_or_update_view(
client: bigquery.Client,
source_view: bigquery.Table,
source_project: str,
target_project: str,
) -> None:
try:
target_view = client.get_table(
f"{target_project}.{source_view.dataset_id}.{source_view.table_id}"
)
except google.api_core.exceptions.NotFound:
target_view = None
_view = bigquery.Table(
f"{target_project}.{source_view.dataset_id}.{source_view.table_id}"
)
_view.description = source_view.description
_view.view_query = source_view.view_query.replace(source_project, target_project)
# Create the view if it doesn't exist. Otherwise, update it.
if not target_view:
view = client.create_table(_view)
logging.info(f"View {view.full_table_id} successfully created.")
else:
view = client.update_table(_view, ["view_query", "description"])
logging.info(f"View {view.full_table_id} successfully updated.")
def sync_views(
client: bigquery.Client,
dataset: str,
source_views: typing.List[str],
target_project: str,
) -> None:
"""Syncs views between source and target BQ datasets.
If a view exists in the target dataset but not in the source dataset, that
view must be deleted from the target dataset.
"""
target_tables = client.list_tables(f"{target_project}.{dataset}")
for target_table in target_tables:
if not target_table.table_type == "VIEW":
continue
if target_table.table_id not in source_views:
logging.info(
f"Extra view {target_project}.{dataset}.{target_table.table_id} will be deleted."
)
client.delete_table(
f"{target_project}.{dataset}.{target_table.table_id}", not_found_ok=True
)
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
main(
source_project=os.environ["SOURCE_PROJECT_ID"],
target_project=os.environ["TARGET_PROJECT_ID"],
bq_datasets=json.loads(os.environ["BQ_DATASETS"]),
service_account=os.environ["SERVICE_ACCOUNT"],
)
| apache-2.0 |
laserson/eggo | eggo/cli/datasets.py | 3 | 2199 | # Licensed to Big Data Genomics (BDG) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The BDG licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from click import group, option, File
from eggo import operations
@group(context_settings={'help_option_names': ['-h', '--help']})
def main():
"""eggo-data -- operations on common genomics datasets"""
pass
@main.command()
@option('--input', help='Path to datapackage.json file for dataset')
@option('--output', help='Fully-qualified HDFS destination path')
def dnload_raw(input, output):
"""Parallel download raw dataset from datapackage.json using Hadoop"""
with open(input) as ip:
datapackage = json.load(ip)
operations.download_dataset_with_hadoop(datapackage, output)
@main.command()
@option('--cm-host', help='Hostname for Cloudera Manager')
@option('--cm-port', default=7180, show_default=True,
help='Port for Cloudera Manager')
@option('--username', default='admin', show_default=True, help='CM username')
@option('--password', default='admin', show_default=True, help='CM password')
@option('--output', type=File(mode='w'), default='-', show_default=True,
help='Output destination ("-" for stdout)')
def gen_env_vars(cm_host, cm_port, username, password, output):
"""Generate env vars required for eggo scripts to run"""
env_vars = operations.generate_eggo_env_vars(cm_host, cm_port, username,
password)
for (k, v) in env_vars.iteritems():
output.write("export {0}={1}\n".format(k, v))
| apache-2.0 |
Hellybean/SaberMod_ROM_Toolchain | libstdc++-v3/scripts/make_graph.py | 172 | 9251 | #!/usr/bin/python
import string
import sys
import re
from Numeric import *
from pychart import *
from xml.dom import minidom
class exception:
pass
class res:
"""
A 'structure' representing the results of a test.
"""
def __init__(self, x_label, y_label, cntnr_list, cntnr_descs, res_sets):
self.x_label = x_label
self.y_label = y_label
self.cntnr_list = cntnr_list
self.cntnr_descs = cntnr_descs
self.res_sets = res_sets
class res_getter:
"""
This class returns a res object for some test.
"""
class __sorter:
def __accum(self, results):
total = 0
for result in results:
total = total + result[1]
return total
def sort(self, cntnr_list, res_sets):
cntnrs_and_totals = []
for cntnr in cntnr_list:
results = res_sets[cntnr]
total = self.__accum(results)
cntnrs_and_totals.append((cntnr, total))
by_total = lambda x,y: x[1] > y[1] and -1 or 1
cntnrs_and_totals.sort(by_total)
ret = []
for cntnr_and_total in cntnrs_and_totals:
cntnr = cntnr_and_total[0]
ret.append(cntnr)
return ret
def __init__(self, test_infos_f_name):
self.__test_to_container_res_sets = {}
self.__test_to_f_names = {}
tests_dat = minidom.parse(test_infos_f_name)
for test in tests_dat.getElementsByTagName('test'):
test_name = test.attributes['name'].value
self.__test_to_f_names[test_name] = test.getElementsByTagName('file')[0].attributes['name'].value
cntnr_list = []
for cntnr in test.getElementsByTagName('cntnr'):
cntnr_list.append(cntnr.attributes['name'].value)
self.__test_to_container_res_sets[test_name] = cntnr_list
def __get_label(self, tst_dat, label_name):
label = tst_dat.getElementsByTagName(label_name)[0].firstChild.data
label = string.strip(label, '\n')
label = string.strip(label)
return label
def __parse_result_sets(self, f_name, cntnr_list):
tst_dat = minidom.parse(f_name)
x_label = self.__get_label(tst_dat, 'x_name')
y_label = self.__get_label(tst_dat, 'y_name')
parsed_container_list = tst_dat.getElementsByTagName('cntnr')
res_sets = {}
cntnr_descs = {}
for cntnr in parsed_container_list:
cntnr_name = cntnr.attributes["name"].value
res_sets[cntnr_name] = []
for cntnr in parsed_container_list:
cntnr_name = cntnr.attributes["name"].value
cntnr_desc = cntnr.getElementsByTagName('desc')
if res_sets.has_key(cntnr_name):
res_set = []
result_list = cntnr.getElementsByTagName('result')
for result in result_list:
x = string.atol(result.attributes["x"].value)
y = string.atof(result.attributes["y"].value)
res_set.append((x, y))
res_sets[cntnr_name] = res_set
cntnr_descs[cntnr_name] = cntnr_desc[0]
return (x_label, y_label, cntnr_descs, res_sets)
def get(self, res_dir, test_name):
cntnr_list = self.__test_to_container_res_sets[test_name]
f_name = res_dir + '/' + self.__test_to_f_names[test_name]
parsed = self.__parse_result_sets(f_name, cntnr_list)
x_label = parsed[0]
y_label = parsed[1]
cntnr_descs = parsed[2]
res_sets = parsed[3]
cntnr_list = self.__sorter().sort(cntnr_list, res_sets)
return res(x_label, y_label, cntnr_list, cntnr_descs, res_sets)
class image_maker:
"""
This class creates a svg file from a result set.
"""
class __style_chooser:
def __init__(self):
self.native_re = re.compile(r'n_(?:.*?)')
self.native_tick_mark_0 = tick_mark.blackdtri
self.native_tick_mark_1 = tick_mark.blackdia
self.native_line_style_0 = line_style.gray50_dash1
self.native_line_style_1 = line_style.gray50_dash2
self.mask_re = re.compile(r'mask(?:.*?)')
self.mod_re = re.compile(r'mod(?:.*?)')
self.rb_tree_mmap_rb_tree_set_re = re.compile(r'rb_tree_mmap_rb_tree_set(?:.*?)')
self.rb_tree_mmap_lu_mtf_set_re = re.compile(r'rb_tree_mmap_lu_mtf_set(?:.*?)')
self.splay_re = re.compile(r'splay(?:.*?)')
self.rb_tree_re = re.compile(r'rb_tree(?:.*?)')
self.ov_tree_re = re.compile(r'ov_tree(?:.*?)')
self.splay_tree_re = re.compile(r'splay_tree(?:.*?)')
self.pat_trie_re = re.compile(r'pat_trie(?:.*?)')
self.lc_1div8_1div2_re = re.compile(r'lc_1div8_1div2(?:.*?)')
self.lc_1div8_1div1_re = re.compile(r'lc_1div8_1div1(?:.*?)')
self.mcolc_1div2_re = re.compile(r'mcolc_1div2(?:.*?)')
def choose(self, cntnr):
if self.native_re.search(cntnr):
if cntnr == 'n_pq_vector':
return (self.native_tick_mark_1, self.native_line_style_1)
return (self.native_tick_mark_0, self.native_line_style_0)
# tick_mark predefined
# square, circle3, dia, tri, dtri, star, plus5, x5, gray70dia, blackdtri, blackdia
if self.mask_re.search(cntnr):
clr = color.navy
elif self.mod_re.search(cntnr):
clr = color.green4
elif self.rb_tree_mmap_rb_tree_set_re.search(cntnr):
clr = color.mediumblue
tm = tick_mark.square
elif self.rb_tree_mmap_lu_mtf_set_re.search(cntnr) or cntnr == 'rc_binomial_heap':
clr = color.gray50
tm = tick_mark.dia
elif self.splay_tree_re.search(cntnr) or cntnr == 'binomial_heap':
clr = color.gray58
tm = tick_mark.tri
elif self.rb_tree_re.search(cntnr) or cntnr == 'binary_heap':
clr = color.red3
tm = tick_mark.dtri
elif self.ov_tree_re.search(cntnr) or cntnr == 'thin_heap':
clr = color.orangered1
tm = tick_mark.star
elif self.pat_trie_re.search(cntnr) or cntnr == 'pairing_heap':
clr = color.blueviolet
tm = tick_mark.plus5
else:
sys.stderr.write(cntnr + '\n')
raise exception
# mask / mod
if cntnr.find('lc_1div8_1div') <> -1:
if cntnr.find('mask') <> -1:
# mask
if self.lc_1div8_1div2_re.search(cntnr):
if cntnr.find('nsth') <> -1:
tm = tick_mark.x5
else:
tm = tick_mark.gray70dia
if self.lc_1div8_1div1_re.search(cntnr):
if cntnr.find('nsth') <> -1:
tm = tick_mark.dia
else:
tm = tick_mark.circle3
else:
# mod
if self.lc_1div8_1div2_re.search(cntnr):
if cntnr.find('nsth') <> -1:
tm = tick_mark.tri
else:
tm = tick_mark.square
if self.lc_1div8_1div1_re.search(cntnr):
if cntnr.find('nsth') <> -1:
tm = tick_mark.dtri
else:
tm = tick_mark.star
if self.mcolc_1div2_re.search(cntnr):
tm = tick_mark.circle3
return (tm, line_style.T(color = clr, width = 2))
def __init__(self):
self.__sc = self.__style_chooser()
self.__mmap_re = re.compile('mmap_')
def __container_label_name(self, cntnr):
return self.__mmap_re.sub('\nmmap_\n', cntnr)
def make(self, res, of_name):
print of_name
# theme settings
theme.debug_level = 3
theme.output_format = 'svg'
theme.scale_factor = 2
theme.default_line_width = 0.5
theme.default_font_size = 8
theme.use_color = 1
theme.reinitialize()
# canvas settings
f = file(of_name, "w")
can = canvas.init(f, "svg")
# axes
y_tick_interval = self.__get_y_tics(res)
xaxis = axis.X(format = "/6/i/a-90{}%d",
tic_interval = 200,
label = res.x_label, label_offset = (0, -20))
yaxis = axis.Y(format = "/6/i/a0{}%.2e",
tic_interval = y_tick_interval, tic_label_offset = (-25, 0),
label = res.y_label, label_offset = (-15, 0))
# legend
legend_lines = len(res.cntnr_list)
legend_vloc = 80 + (legend_lines * 10)
legend_hloc = -0
lg = legend.T(loc=(legend_hloc,-legend_vloc),
frame_line_style = None, inter_row_sep = 2)
# plot datasets
ar = area.T(x_axis = xaxis, y_axis = yaxis, legend = lg, size = (240,110), x_range = (0, 2200))
plot_list = []
for cntnr in res.cntnr_list:
style = self.__sc.choose(cntnr)
pl = line_plot.T(label = self.__container_label_name(cntnr),
data = res.res_sets[cntnr],
tick_mark = style[0],
line_style = style[1])
plot_list.append(pl)
for plot in plot_list:
ar.add_plot(plot)
# render image
ar.draw(can)
can.close()
def __get_y_max_min(self, res):
mx = 0
nx = 0
for cntnr in res.cntnr_list:
m = max(d[1] for d in res.res_sets[cntnr])
mx = max(m, mx)
n = min(d[1] for d in res.res_sets[cntnr])
nx = min(n, nx)
return (mx, nx)
def __get_x_max_min(self, res):
mx = 0
nx = 0
for cntnr in res.cntnr_list:
m = max(d[0] for d in res.res_sets[cntnr])
mx = max(m, mx)
n = min(d[0] for d in res.res_sets[cntnr])
nx = min(n, nx)
return (mx, nx)
def __get_y_tics(self, res):
mx = 0
for cntnr in res.cntnr_list:
m = max(d[1] for d in res.res_sets[cntnr])
mx = max(m, mx)
return mx / 5
def main(test_infos_f_name, res_dir, doc_dir):
xmls_dat = minidom.parse(test_infos_f_name)
for test in xmls_dat.getElementsByTagName('test'):
# parse results
test_name = test.attributes['name'].value
res_gtr = res_getter(test_infos_f_name)
res = res_gtr.get(res_dir, test_name)
# generate image
image_mkr = image_maker()
svg_of_name = doc_dir + '/pbds_' + test_name + '.svg'
image_mkr.make(res, svg_of_name)
if __name__ == "__main__":
"""
This module takes 3 parameters from the command line:
Tests info XML file name
Test results directory
Image output directory
"""
usg = "make_graph.py <test_info_file> <res_dir> <image_dir>\n"
if len(sys.argv) != 4:
sys.stderr.write(usg)
raise exception
main(sys.argv[1], sys.argv[2], sys.argv[3])
| gpl-2.0 |
yonglehou/scikit-learn | sklearn/utils/metaestimators.py | 281 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
dzimmerer/pytorchcodehelpers | pytorchcodehelpers/pytcodetool.py | 1 | 1291 | import sys
import linecache
from pytorchcodehelpers.pytorchsize import InspectNet
if __name__ == '__main__':
rel_path = sys.argv[1]
file_path = sys.argv[2]
line_start = int(sys.argv[3])
line_end = int(sys.argv[4])
file_name = rel_path.replace("/" , ".")[:-3]
import_str = "from " + file_name + " import *"
lines = []
for i in range(line_start, line_end + 1):
lines.append(linecache.getline(file_path, i))
lines = "".join(lines)
inpt_size = input("Give the input size (default: 1 3 128 128) : ")
inpt_size = inpt_size if inpt_size else "1 3 128 128"
inpt_size = tuple(map(int, inpt_size.split(" ")))
net_inspect = InspectNet(input_size=(inpt_size), pre_exec_str=import_str)
net_inspect.inspect_net(lines)
get_class_anwser = input("Get automatic generated class ? (default: N) : ")
get_class_anwser = get_class_anwser if get_class_anwser else "N"
if get_class_anwser.lower() == "y" or get_class_anwser.lower() == "yes":
m_class_name = input("Class name ? (default: NewModule) : ")
m_class_name = m_class_name if m_class_name else "NewModule"
net_inspect.name = m_class_name
print("\n")
print(net_inspect.get_class_str())
# print(lines)
print("\nDone.")
| apache-2.0 |
ageron/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/io_test.py | 133 | 5063 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class IOTest(test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(
a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(
start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
yonglehou/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 257 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
mxjl620/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 257 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
benoitsteiner/tensorflow-xsmm | tensorflow/python/data/kernel_tests/map_dataset_op_test.py | 5 | 27469 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import threading
import time
import warnings
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class MapDatasetTest(test.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count))
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildMapDataset(components, count)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={count: 14})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={count: 18})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_parallel_calls,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn, num_parallel_calls=num_parallel_calls)
.prefetch(output_buffer_size)
.repeat(count))
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
num_parallel_calls = array_ops.placeholder(dtypes.int32, shape=[])
output_buffer_size = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildParallelMapDataset(
components, count, num_parallel_calls, output_buffer_size)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
def do_test(num_parallel_calls_val, output_buffer_size_val):
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={
count: 14,
num_parallel_calls: num_parallel_calls_val,
output_buffer_size: output_buffer_size_val})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={
count: 18,
num_parallel_calls: num_parallel_calls_val,
output_buffer_size: output_buffer_size_val})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_parallel_calls_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_parallel_calls_val, output_buffer_size_val)
def testImplicitDisposeParallelMapDataset(self):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapUnspecifiedOutputSize(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetchError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.prefetch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
iterator = (input_sentences
.map(lambda x: string_ops.string_split([x]).values)
.map(table.lookup)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(table.init)
sess.run(init_op)
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: queue.dequeue()).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for element in elements:
self.assertEqual(element, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureSameResourceMultipleTimes(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: (queue.dequeue(), queue_2.dequeue()))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for i in range(100):
self.assertEqual(sorted([elements[i * 2], elements[i * 2 + 1]]),
sorted(sess.run(get_next)))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureVariable(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(counter_var.initializer)
sess.run(init_op)
for i in range(10):
self.assertEqual(i, sess.run(counter_var))
self.assertEqual(i + 1, sess.run(get_next))
self.assertEqual(10, sess.run(counter_var))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
self.assertEqual(10, sess.run(counter_var))
def testCaptureUninitializedVariableError(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.NotFoundError):
sess.run(get_next)
def testSeededStatefulOperatorIsProperlyStateful(self):
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(sess.run(get_next))
self.assertEqual(10, len(random_values))
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
sess.run(init_op)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(sess.run(get_next))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testMapDict(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x ** 2})
.map(lambda d: d["foo"] + d["bar"])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual(i * 2 + i ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMapNamedtuple(self, count=10):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(count)
images = labels.map(lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = namedtuple("Example", ["label", "image"])
dataset_namedtuple = dataset_tuple.map(example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = dataset_tuple.map(preprocess_tuple)
dataset_namedtuple = dataset_namedtuple.map(preprocess_namedtuple)
next_tuple = dataset_tuple.make_one_shot_iterator().get_next()
next_namedtuple = dataset_namedtuple.make_one_shot_iterator().get_next()
# make sure both datasets contain the same data
with self.test_session() as sess:
for i in range(count):
tuple_, namedtuple_ = sess.run([next_tuple, next_namedtuple])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_namedtuple)
def testUseStepContainerInMap(self):
row = np.arange(6)
iterator = (
dataset_ops.Dataset.from_tensors(row)
.map(lambda elems: functional_ops.map_fn(lambda x: x * x, elems))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual(row ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetch(self):
# We will use this event to test that `_map_py_func()` has been
# invoked a certain number of times (6 times, to be exact) after
# consuming fewer elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
buffer_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (
dataset_ops.Dataset.range(100)
.map(_map_fn)
.prefetch(buffer_size_placeholder)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# Simple test that prefetch yields the expected values in the
# expected order.
for buffer_size in [1, 10, 100, 1000]:
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# We can indirectly observe that varying the buffer size has the
# intended effect by observing when `ev` is set (on the 6th
# invocation of `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least
# one element to start the prefetching.
for buffer_size in range(1, set_event_during_invocation):
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, sess.run(get_next))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReturnList(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: [x, constant_op.constant(37.0)])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMultiOutputPyFunc(self):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
iterator = (dataset_ops.Dataset.range(10)
.map(_map_fn)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def assertSparseValuesEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
iterator = (dataset_ops.Dataset.range(10)
.map(_sparse)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _sparse(i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSparseChain(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
iterator = (
dataset_ops.Dataset.range(10).map(_sparse).map(_check)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _check(_sparse(i)).eval())
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testParallelMapOutOfRangeError(self):
def raising_py_func(i):
if i == 100:
raise StopIteration()
else:
return i
iterator = (
dataset_ops.Dataset.range(105)
.map(lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),
num_parallel_calls=2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(100):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testConstantOutput(self):
iterator = (
dataset_ops.Dataset.range(10).map(lambda x: [x, "hello", 10])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, b"hello", 10), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testWarnOnLookupTable(self):
def collecting_function(x):
_ = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer([], []), 0.0, name="t1")
return x
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(collecting_function)
# NOTE(mrry): Python 3 prints other warnings in addition to the one we are
# testing, so we search for the expected warning.
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Creating lookup tables inside a function passed to Dataset.map() is "
"not supported." in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
def testNestedDatasetError(self):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegexp(
NotImplementedError, r"The Dataset.map\(\) transformation does not "
"currently support nested datasets as outputs."):
_ = dataset.map(dataset_ops.Dataset.from_tensor_slices)
class MapDatasetBenchmark(test.Benchmark):
def benchmarkChainOfMaps(self):
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
for _ in range(chain_length):
dataset = dataset.map(lambda x: x)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(5):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
print("Map dataset chain length: %d Median wall time: %f"
% (chain_length, median_wall_time))
self.report_benchmark(
iters=1000, wall_time=median_wall_time,
name="benchmark_map_dataset_chain_latency_%d" % chain_length)
def benchmarkMapFanOut(self):
fan_outs = [1, 2, 5, 10, 20, 50, 100]
for fan_out in fan_outs:
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(
tuple(0 for _ in range(fan_out))).repeat(None).map(lambda *xs: xs)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(5):
sess.run(next_element[0].op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element[0].op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
print("Map dataset fan out: %d Median wall time: %f"
% (fan_out, median_wall_time))
self.report_benchmark(
iters=1000, wall_time=median_wall_time,
name="benchmark_map_dataset_fan_out_%d" % fan_out)
if __name__ == "__main__":
test.main()
| apache-2.0 |
dimkal/mne-python | mne/viz/tests/test_decoding.py | 10 | 3823 | # Authors: Denis Engemann <denis.engemann@gmail.com>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License: Simplified BSD
import os.path as op
import warnings
from nose.tools import assert_raises, assert_equals
import numpy as np
from mne.epochs import equalize_epoch_counts, concatenate_epochs
from mne.decoding import GeneralizationAcrossTime
from mne import io, Epochs, read_events, pick_types
from mne.utils import requires_sklearn, run_tests_if_main
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
warnings.simplefilter('always') # enable b/c these tests throw warnings
def _get_data(tmin=-0.2, tmax=0.5, event_id=dict(aud_l=1, vis_l=3),
event_id_gen=dict(aud_l=2, vis_l=4), test_times=None):
"""Aux function for testing GAT viz"""
gat = GeneralizationAcrossTime()
raw = io.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg='mag', stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
decim = 30
# Test on time generalization within one condition
with warnings.catch_warnings(record=True):
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, decim=decim)
epochs_list = [epochs[k] for k in event_id]
equalize_epoch_counts(epochs_list)
epochs = concatenate_epochs(epochs_list)
# Test default running
gat = GeneralizationAcrossTime(test_times=test_times)
gat.fit(epochs)
gat.score(epochs)
return gat
@requires_sklearn
def test_gat_plot_matrix():
"""Test GAT matrix plot"""
gat = _get_data()
gat.plot()
del gat.scores_
assert_raises(RuntimeError, gat.plot)
@requires_sklearn
def test_gat_plot_diagonal():
"""Test GAT diagonal plot"""
gat = _get_data()
gat.plot_diagonal()
del gat.scores_
assert_raises(RuntimeError, gat.plot)
@requires_sklearn
def test_gat_plot_times():
"""Test GAT times plot"""
gat = _get_data()
# test one line
gat.plot_times(gat.train_times_['times'][0])
# test multiple lines
gat.plot_times(gat.train_times_['times'])
# test multiple colors
n_times = len(gat.train_times_['times'])
colors = np.tile(['r', 'g', 'b'],
int(np.ceil(n_times / 3)))[:n_times]
gat.plot_times(gat.train_times_['times'], color=colors)
# test invalid time point
assert_raises(ValueError, gat.plot_times, -1.)
# test float type
assert_raises(ValueError, gat.plot_times, 1)
assert_raises(ValueError, gat.plot_times, 'diagonal')
del gat.scores_
assert_raises(RuntimeError, gat.plot)
def chance(ax):
return ax.get_children()[1].get_lines()[0].get_ydata()[0]
@requires_sklearn
def test_gat_chance_level():
"""Test GAT plot_times chance level"""
gat = _get_data()
ax = gat.plot_diagonal(chance=False)
ax = gat.plot_diagonal()
assert_equals(chance(ax), .5)
gat = _get_data(event_id=dict(aud_l=1, vis_l=3, aud_r=2, vis_r=4))
ax = gat.plot_diagonal()
assert_equals(chance(ax), .25)
ax = gat.plot_diagonal(chance=1.234)
assert_equals(chance(ax), 1.234)
assert_raises(ValueError, gat.plot_diagonal, chance='foo')
del gat.scores_
assert_raises(RuntimeError, gat.plot)
@requires_sklearn
def test_gat_plot_nonsquared():
"""Test GAT diagonal plot"""
gat = _get_data(test_times=dict(start=0.))
gat.plot()
ax = gat.plot_diagonal()
scores = ax.get_children()[1].get_lines()[2].get_ydata()
assert_equals(len(scores), len(gat.estimators_))
run_tests_if_main()
| bsd-3-clause |